consensus_core/
transaction.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
// Copyright (c) Mysten Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{collections::BTreeMap, sync::Arc};

use mysten_common::debug_fatal;
use mysten_metrics::monitored_mpsc::{channel, Receiver, Sender};
use parking_lot::Mutex;
use tap::TapFallible;
use thiserror::Error;
use tokio::sync::oneshot;
use tracing::{error, warn};

use crate::{
    block::{BlockRef, Transaction, TransactionIndex},
    context::Context,
    Round,
};

/// The maximum number of transactions pending to the queue to be pulled for block proposal
const MAX_PENDING_TRANSACTIONS: usize = 2_000;

/// The guard acts as an acknowledgment mechanism for the inclusion of the transactions to a block.
/// When its last transaction is included to a block then `included_in_block_ack` will be signalled.
/// If the guard is dropped without getting acknowledged that means the transactions have not been
/// included to a block and the consensus is shutting down.
pub(crate) struct TransactionsGuard {
    // Holds a list of transactions to be included in the block.
    // A TransactionsGuard may be partially consumed by `TransactionConsumer`, in which case, this holds the remaining transactions.
    transactions: Vec<Transaction>,

    included_in_block_ack: oneshot::Sender<(BlockRef, oneshot::Receiver<BlockStatus>)>,
}

/// The TransactionConsumer is responsible for fetching the next transactions to be included for the block proposals.
/// The transactions are submitted to a channel which is shared between the TransactionConsumer and the TransactionClient
/// and are pulled every time the `next` method is called.
pub(crate) struct TransactionConsumer {
    context: Arc<Context>,
    tx_receiver: Receiver<TransactionsGuard>,
    max_transactions_in_block_bytes: u64,
    max_num_transactions_in_block: u64,
    pending_transactions: Option<TransactionsGuard>,
    block_status_subscribers: Arc<Mutex<BTreeMap<BlockRef, Vec<oneshot::Sender<BlockStatus>>>>>,
}

#[derive(Debug, Clone, Eq, PartialEq)]
#[allow(unused)]
pub enum BlockStatus {
    /// The block has been sequenced as part of a committed sub dag. That means that any transaction that has been included in the block
    /// has been committed as well.
    Sequenced(BlockRef),
    /// The block has been garbage collected and will never be committed. Any transactions that have been included in the block should also
    /// be considered as impossible to be committed as part of this block and might need to be retried
    GarbageCollected(BlockRef),
}

#[derive(Debug, Clone, Eq, PartialEq)]
pub enum LimitReached {
    // The maximum number of transactions have been included
    MaxNumOfTransactions,
    // The maximum number of bytes have been included
    MaxBytes,
    // All available transactions have been included
    AllTransactionsIncluded,
}

impl TransactionConsumer {
    pub(crate) fn new(tx_receiver: Receiver<TransactionsGuard>, context: Arc<Context>) -> Self {
        Self {
            tx_receiver,
            max_transactions_in_block_bytes: context
                .protocol_config
                .max_transactions_in_block_bytes(),
            max_num_transactions_in_block: context.protocol_config.max_num_transactions_in_block(),
            context,
            pending_transactions: None,
            block_status_subscribers: Arc::new(Mutex::new(BTreeMap::new())),
        }
    }

    // Attempts to fetch the next transactions that have been submitted for sequence. Respects the `max_transactions_in_block_bytes`
    // and `max_num_transactions_in_block` parameters specified via protocol config.
    // This returns one or more transactions to be included in the block and a callback to acknowledge the inclusion of those transactions.
    // Also returns a `LimitReached` enum to indicate which limit type has been reached.
    pub(crate) fn next(&mut self) -> (Vec<Transaction>, Box<dyn FnOnce(BlockRef)>, LimitReached) {
        let mut transactions = Vec::new();
        let mut acks = Vec::new();
        let mut total_bytes = 0;
        let mut limit_reached = LimitReached::AllTransactionsIncluded;

        // Handle one batch of incoming transactions from TransactionGuard.
        // The method will return `None` if all the transactions can be included in the block. Otherwise none of the transactions will be
        // included in the block and the method will return the TransactionGuard.
        let mut handle_txs = |t: TransactionsGuard| -> Option<TransactionsGuard> {
            let transactions_bytes =
                t.transactions.iter().map(|t| t.data().len()).sum::<usize>() as u64;
            let transactions_num = t.transactions.len() as u64;

            if total_bytes + transactions_bytes > self.max_transactions_in_block_bytes {
                limit_reached = LimitReached::MaxBytes;
                return Some(t);
            }
            if transactions.len() as u64 + transactions_num > self.max_num_transactions_in_block {
                limit_reached = LimitReached::MaxNumOfTransactions;
                return Some(t);
            }

            total_bytes += transactions_bytes;

            // The transactions can be consumed, register its ack.
            acks.push(t.included_in_block_ack);
            transactions.extend(t.transactions);
            None
        };

        if let Some(t) = self.pending_transactions.take() {
            if let Some(pending_transactions) = handle_txs(t) {
                debug_fatal!("Previously pending transaction(s) should fit into an empty block! Dropping: {:?}", pending_transactions.transactions);
            }
        }

        // Until we have reached the limit for the pull.
        // We may have already reached limit in the first iteration above, in which case we stop immediately.
        while self.pending_transactions.is_none() {
            if let Ok(t) = self.tx_receiver.try_recv() {
                self.pending_transactions = handle_txs(t);
            } else {
                break;
            }
        }

        let block_status_subscribers = self.block_status_subscribers.clone();
        let gc_enabled = self.context.protocol_config.gc_depth() > 0;
        (
            transactions,
            Box::new(move |block_ref: BlockRef| {
                let mut block_status_subscribers = block_status_subscribers.lock();

                for ack in acks {
                    let (status_tx, status_rx) = oneshot::channel();

                    if gc_enabled {
                        block_status_subscribers
                            .entry(block_ref)
                            .or_default()
                            .push(status_tx);
                    } else {
                        // When gc is not enabled, then report directly the block as sequenced while tx is acknowledged for inclusion.
                        // As blocks can never get garbage collected it is there is actually no meaning to do otherwise and also is safer for edge cases.
                        status_tx.send(BlockStatus::Sequenced(block_ref)).ok();
                    }

                    let _ = ack.send((block_ref, status_rx));
                }
            }),
            limit_reached,
        )
    }

    /// Notifies all the transaction submitters who are waiting to receive an update on the status of the block.
    /// The `committed_blocks` are the blocks that have been committed and the `gc_round` is the round up to which the blocks have been garbage collected.
    /// First we'll notify for all the committed blocks, and then for all the blocks that have been garbage collected.
    pub(crate) fn notify_own_blocks_status(
        &self,
        committed_blocks: Vec<BlockRef>,
        gc_round: Round,
    ) {
        // Notify for all the committed blocks first
        let mut block_status_subscribers = self.block_status_subscribers.lock();
        for block_ref in committed_blocks {
            if let Some(subscribers) = block_status_subscribers.remove(&block_ref) {
                subscribers.into_iter().for_each(|s| {
                    let _ = s.send(BlockStatus::Sequenced(block_ref));
                });
            }
        }

        // Now notify everyone <= gc_round that their block has been garbage collected and clean up the entries
        while let Some((block_ref, subscribers)) = block_status_subscribers.pop_first() {
            if block_ref.round <= gc_round {
                subscribers.into_iter().for_each(|s| {
                    let _ = s.send(BlockStatus::GarbageCollected(block_ref));
                });
            } else {
                block_status_subscribers.insert(block_ref, subscribers);
                break;
            }
        }
    }

    #[cfg(test)]
    pub(crate) fn subscribe_for_block_status_testing(
        &self,
        block_ref: BlockRef,
    ) -> oneshot::Receiver<BlockStatus> {
        let (tx, rx) = oneshot::channel();
        let mut block_status_subscribers = self.block_status_subscribers.lock();
        block_status_subscribers
            .entry(block_ref)
            .or_default()
            .push(tx);
        rx
    }

    #[cfg(test)]
    fn is_empty(&mut self) -> bool {
        if self.pending_transactions.is_some() {
            return false;
        }
        if let Ok(t) = self.tx_receiver.try_recv() {
            self.pending_transactions = Some(t);
            return false;
        }
        true
    }
}

#[derive(Clone)]
pub struct TransactionClient {
    sender: Sender<TransactionsGuard>,
    max_transaction_size: u64,
    max_transactions_in_block_bytes: u64,
    max_transactions_in_block_count: u64,
}

#[derive(Debug, Error)]
pub enum ClientError {
    #[error("Failed to submit transaction, consensus is shutting down: {0}")]
    ConsensusShuttingDown(String),

    #[error("Transaction size ({0}B) is over limit ({1}B)")]
    OversizedTransaction(u64, u64),

    #[error("Transaction bundle size ({0}B) is over limit ({1}B)")]
    OversizedTransactionBundleBytes(u64, u64),

    #[error("Transaction bundle count ({0}) is over limit ({1})")]
    OversizedTransactionBundleCount(u64, u64),
}

impl TransactionClient {
    pub(crate) fn new(context: Arc<Context>) -> (Self, Receiver<TransactionsGuard>) {
        let (sender, receiver) = channel("consensus_input", MAX_PENDING_TRANSACTIONS);

        (
            Self {
                sender,
                max_transaction_size: context.protocol_config.max_transaction_size_bytes(),
                max_transactions_in_block_bytes: context
                    .protocol_config
                    .max_transactions_in_block_bytes(),
                max_transactions_in_block_count: context
                    .protocol_config
                    .max_num_transactions_in_block(),
            },
            receiver,
        )
    }

    /// Submits a list of transactions to be sequenced. The method returns when all the transactions have been successfully included
    /// to next proposed blocks.
    pub async fn submit(
        &self,
        transactions: Vec<Vec<u8>>,
    ) -> Result<(BlockRef, oneshot::Receiver<BlockStatus>), ClientError> {
        // TODO: Support returning the block refs for transactions that span multiple blocks
        let included_in_block = self.submit_no_wait(transactions).await?;
        included_in_block
            .await
            .tap_err(|e| warn!("Transaction acknowledge failed with {:?}", e))
            .map_err(|e| ClientError::ConsensusShuttingDown(e.to_string()))
    }

    /// Submits a list of transactions to be sequenced.
    /// If any transaction's length exceeds `max_transaction_size`, no transaction will be submitted.
    /// That shouldn't be the common case as sizes should be aligned between consensus and client. The method returns
    /// a receiver to wait on until the transactions has been included in the next block to get proposed. The consumer should
    /// wait on it to consider as inclusion acknowledgement. If the receiver errors then consensus is shutting down and transaction
    /// has not been included to any block.
    /// If multiple transactions are submitted, the method will attempt to bundle them together in a single block. If the total size of
    /// the transactions exceeds `max_transactions_in_block_bytes`, no transaction will be submitted and an error will be returned instead.
    /// Similar if transactions exceed `max_transactions_in_block_count` an error will be returned.
    pub(crate) async fn submit_no_wait(
        &self,
        transactions: Vec<Vec<u8>>,
    ) -> Result<oneshot::Receiver<(BlockRef, oneshot::Receiver<BlockStatus>)>, ClientError> {
        let (included_in_block_ack_send, included_in_block_ack_receive) = oneshot::channel();

        let mut bundle_size = 0;

        if transactions.len() as u64 > self.max_transactions_in_block_count {
            return Err(ClientError::OversizedTransactionBundleCount(
                transactions.len() as u64,
                self.max_transactions_in_block_count,
            ));
        }

        for transaction in &transactions {
            if transaction.len() as u64 > self.max_transaction_size {
                return Err(ClientError::OversizedTransaction(
                    transaction.len() as u64,
                    self.max_transaction_size,
                ));
            }
            bundle_size += transaction.len() as u64;

            if bundle_size > self.max_transactions_in_block_bytes {
                return Err(ClientError::OversizedTransactionBundleBytes(
                    bundle_size,
                    self.max_transactions_in_block_bytes,
                ));
            }
        }

        let t = TransactionsGuard {
            transactions: transactions.into_iter().map(Transaction::new).collect(),
            included_in_block_ack: included_in_block_ack_send,
        };
        self.sender
            .send(t)
            .await
            .tap_err(|e| error!("Submit transactions failed with {:?}", e))
            .map_err(|e| ClientError::ConsensusShuttingDown(e.to_string()))?;
        Ok(included_in_block_ack_receive)
    }
}

/// `TransactionVerifier` implementation is supplied by Sui to validate transactions in a block,
/// before acceptance of the block.
pub trait TransactionVerifier: Send + Sync + 'static {
    /// Determines if this batch of transactions is valid.
    /// Fails if any one of the transactions is invalid.
    fn verify_batch(&self, batch: &[&[u8]]) -> Result<(), ValidationError>;

    /// Returns indices of transactions to reject, or a transaction validation error.
    /// Currently only uncertified user transactions can be voted to reject, which are created
    /// by Mysticeti fastpath client.
    /// Honest validators may disagree on voting for uncertified user transactions.
    /// The other types of transactions are implicitly voted to be accepted if they pass validation.
    ///
    /// Honest validators should produce the same validation outcome on the same batch of
    /// transactions. So if a batch from a peer fails validation, the peer is equivocating.
    fn verify_and_vote_batch(
        &self,
        batch: &[&[u8]],
    ) -> Result<Vec<TransactionIndex>, ValidationError>;
}

#[derive(Debug, Error)]
pub enum ValidationError {
    #[error("Invalid transaction: {0}")]
    InvalidTransaction(String),
}

/// `NoopTransactionVerifier` accepts all transactions.
#[cfg(any(test, msim))]
pub struct NoopTransactionVerifier;

#[cfg(any(test, msim))]
impl TransactionVerifier for NoopTransactionVerifier {
    fn verify_batch(&self, _batch: &[&[u8]]) -> Result<(), ValidationError> {
        Ok(())
    }

    fn verify_and_vote_batch(
        &self,
        _batch: &[&[u8]],
    ) -> Result<Vec<TransactionIndex>, ValidationError> {
        Ok(vec![])
    }
}

#[cfg(test)]
mod tests {
    use std::{sync::Arc, time::Duration};

    use consensus_config::AuthorityIndex;
    use futures::{stream::FuturesUnordered, StreamExt};
    use sui_protocol_config::ProtocolConfig;
    use tokio::time::timeout;

    use crate::transaction::NoopTransactionVerifier;
    use crate::{
        block::{BlockDigest, BlockRef},
        block_verifier::SignedBlockVerifier,
        context::Context,
        transaction::{BlockStatus, LimitReached, TransactionClient, TransactionConsumer},
    };

    #[tokio::test(flavor = "current_thread", start_paused = true)]
    async fn basic_submit_and_consume() {
        let _guard = ProtocolConfig::apply_overrides_for_testing(|_, mut config| {
            config.set_consensus_max_transaction_size_bytes_for_testing(2_000); // 2KB
            config.set_consensus_max_transactions_in_block_bytes_for_testing(2_000);
            config
        });

        let context = Arc::new(Context::new_for_test(4).0);
        let (client, tx_receiver) = TransactionClient::new(context.clone());
        let mut consumer = TransactionConsumer::new(tx_receiver, context.clone());

        // submit asynchronously the transactions and keep the waiters
        let mut included_in_block_waiters = FuturesUnordered::new();
        for i in 0..3 {
            let transaction =
                bcs::to_bytes(&format!("transaction {i}")).expect("Serialization should not fail.");
            let w = client
                .submit_no_wait(vec![transaction])
                .await
                .expect("Shouldn't submit successfully transaction");
            included_in_block_waiters.push(w);
        }

        // now pull the transactions from the consumer
        let (transactions, ack_transactions, _limit_reached) = consumer.next();
        assert_eq!(transactions.len(), 3);

        for (i, t) in transactions.iter().enumerate() {
            let t: String = bcs::from_bytes(t.data()).unwrap();
            assert_eq!(format!("transaction {i}").to_string(), t);
        }

        assert!(
            timeout(Duration::from_secs(1), included_in_block_waiters.next())
                .await
                .is_err(),
            "We should expect to timeout as none of the transactions have been acknowledged yet"
        );

        // Now acknowledge the inclusion of transactions
        ack_transactions(BlockRef::MIN);

        // Now make sure that all the waiters have returned
        while let Some(result) = included_in_block_waiters.next().await {
            assert!(result.is_ok());
        }

        // try to pull again transactions, result should be empty
        assert!(consumer.is_empty());
    }

    #[tokio::test(flavor = "current_thread", start_paused = true)]
    async fn block_status_update_gc_enabled() {
        let _guard = ProtocolConfig::apply_overrides_for_testing(|_, mut config| {
            config.set_consensus_max_transaction_size_bytes_for_testing(2_000); // 2KB
            config.set_consensus_max_transactions_in_block_bytes_for_testing(2_000);
            config.set_consensus_gc_depth_for_testing(10);
            config
        });

        let context = Arc::new(Context::new_for_test(4).0);
        let (client, tx_receiver) = TransactionClient::new(context.clone());
        let mut consumer = TransactionConsumer::new(tx_receiver, context.clone());

        // submit the transactions and include 2 of each on a new block
        let mut included_in_block_waiters = FuturesUnordered::new();
        for i in 1..=10 {
            let transaction =
                bcs::to_bytes(&format!("transaction {i}")).expect("Serialization should not fail.");
            let w = client
                .submit_no_wait(vec![transaction])
                .await
                .expect("Shouldn't submit successfully transaction");
            included_in_block_waiters.push(w);

            // Every 2 transactions simulate the creation of a new block and acknowledge the inclusion of the transactions
            if i % 2 == 0 {
                let (transactions, ack_transactions, _limit_reached) = consumer.next();
                assert_eq!(transactions.len(), 2);
                ack_transactions(BlockRef::new(
                    i,
                    AuthorityIndex::new_for_test(0),
                    BlockDigest::MIN,
                ));
            }
        }

        // Now iterate over all the waiters. Everyone should have been acknowledged.
        let mut block_status_waiters = Vec::new();
        while let Some(result) = included_in_block_waiters.next().await {
            let (block_ref, block_status_waiter) =
                result.expect("Block inclusion waiter shouldn't fail");
            block_status_waiters.push((block_ref, block_status_waiter));
        }

        // Now acknowledge the commit of the blocks 6, 8, 10 and set gc_round = 5, which should trigger the garbage collection of blocks 1..=5
        let gc_round = 5;
        consumer.notify_own_blocks_status(
            vec![
                BlockRef::new(6, AuthorityIndex::new_for_test(0), BlockDigest::MIN),
                BlockRef::new(8, AuthorityIndex::new_for_test(0), BlockDigest::MIN),
                BlockRef::new(10, AuthorityIndex::new_for_test(0), BlockDigest::MIN),
            ],
            gc_round,
        );

        // Now iterate over all the block status waiters. Everyone should have been notified.
        for (block_ref, waiter) in block_status_waiters {
            let block_status = waiter.await.expect("Block status waiter shouldn't fail");

            if block_ref.round <= gc_round {
                assert!(matches!(block_status, BlockStatus::GarbageCollected(_)))
            } else {
                assert!(matches!(block_status, BlockStatus::Sequenced(_)));
            }
        }

        // Ensure internal structure is clear
        assert!(consumer.block_status_subscribers.lock().is_empty());
    }

    #[tokio::test(flavor = "current_thread", start_paused = true)]
    async fn block_status_update_gc_disabled() {
        let _guard = ProtocolConfig::apply_overrides_for_testing(|_, mut config| {
            config.set_consensus_max_transaction_size_bytes_for_testing(2_000); // 2KB
            config.set_consensus_max_transactions_in_block_bytes_for_testing(2_000);
            config.set_consensus_gc_depth_for_testing(0);
            config
        });

        let context = Arc::new(Context::new_for_test(4).0);
        let (client, tx_receiver) = TransactionClient::new(context.clone());
        let mut consumer = TransactionConsumer::new(tx_receiver, context.clone());

        // submit the transactions and include 2 of each on a new block
        let mut included_in_block_waiters = FuturesUnordered::new();
        for i in 1..=10 {
            let transaction =
                bcs::to_bytes(&format!("transaction {i}")).expect("Serialization should not fail.");
            let w = client
                .submit_no_wait(vec![transaction])
                .await
                .expect("Shouldn't submit successfully transaction");
            included_in_block_waiters.push(w);

            // Every 2 transactions simulate the creation of a new block and acknowledge the inclusion of the transactions
            if i % 2 == 0 {
                let (transactions, ack_transactions, _limit_reached) = consumer.next();
                assert_eq!(transactions.len(), 2);
                ack_transactions(BlockRef::new(
                    i,
                    AuthorityIndex::new_for_test(0),
                    BlockDigest::MIN,
                ));
            }
        }

        // Now iterate over all the waiters. Everyone should have been acknowledged.
        let mut block_status_waiters = Vec::new();
        while let Some(result) = included_in_block_waiters.next().await {
            let (block_ref, block_status_waiter) =
                result.expect("Block inclusion waiter shouldn't fail");
            block_status_waiters.push((block_ref, block_status_waiter));
        }

        // Now iterate over all the block status waiters. Everyone should have been notified and everyone should be considered sequenced.
        for (_block_ref, waiter) in block_status_waiters {
            let block_status = waiter.await.expect("Block status waiter shouldn't fail");
            assert!(matches!(block_status, BlockStatus::Sequenced(_)));
        }

        // Ensure internal structure is clear
        assert!(consumer.block_status_subscribers.lock().is_empty());
    }

    #[tokio::test]
    async fn submit_over_max_fetch_size_and_consume() {
        let _guard = ProtocolConfig::apply_overrides_for_testing(|_, mut config| {
            config.set_consensus_max_transaction_size_bytes_for_testing(100);
            config.set_consensus_max_transactions_in_block_bytes_for_testing(100);
            config
        });

        let context = Arc::new(Context::new_for_test(4).0);
        let (client, tx_receiver) = TransactionClient::new(context.clone());
        let mut consumer = TransactionConsumer::new(tx_receiver, context.clone());

        // submit some transactions
        for i in 0..10 {
            let transaction =
                bcs::to_bytes(&format!("transaction {i}")).expect("Serialization should not fail.");
            let _w = client
                .submit_no_wait(vec![transaction])
                .await
                .expect("Shouldn't submit successfully transaction");
        }

        // now pull the transactions from the consumer
        let mut all_transactions = Vec::new();
        let (transactions, _ack_transactions, _limit_reached) = consumer.next();
        assert_eq!(transactions.len(), 7);

        // ensure their total size is less than `max_bytes_to_fetch`
        let total_size: u64 = transactions.iter().map(|t| t.data().len() as u64).sum();
        assert!(
            total_size <= context.protocol_config.max_transactions_in_block_bytes(),
            "Should have fetched transactions up to {}",
            context.protocol_config.max_transactions_in_block_bytes()
        );
        all_transactions.extend(transactions);

        // try to pull again transactions, next should be provided
        let (transactions, _ack_transactions, _limit_reached) = consumer.next();
        assert_eq!(transactions.len(), 3);

        // ensure their total size is less than `max_bytes_to_fetch`
        let total_size: u64 = transactions.iter().map(|t| t.data().len() as u64).sum();
        assert!(
            total_size <= context.protocol_config.max_transactions_in_block_bytes(),
            "Should have fetched transactions up to {}",
            context.protocol_config.max_transactions_in_block_bytes()
        );
        all_transactions.extend(transactions);

        // try to pull again transactions, result should be empty
        assert!(consumer.is_empty());

        for (i, t) in all_transactions.iter().enumerate() {
            let t: String = bcs::from_bytes(t.data()).unwrap();
            assert_eq!(format!("transaction {i}").to_string(), t);
        }
    }

    #[tokio::test]
    async fn submit_large_batch_and_ack() {
        let _guard = ProtocolConfig::apply_overrides_for_testing(|_, mut config| {
            config.set_consensus_max_transaction_size_bytes_for_testing(15);
            config.set_consensus_max_transactions_in_block_bytes_for_testing(200);
            config
        });

        let context = Arc::new(Context::new_for_test(4).0);
        let (client, tx_receiver) = TransactionClient::new(context.clone());
        let mut consumer = TransactionConsumer::new(tx_receiver, context.clone());
        let mut all_receivers = Vec::new();
        // submit a few transactions individually.
        for i in 0..10 {
            let transaction =
                bcs::to_bytes(&format!("transaction {i}")).expect("Serialization should not fail.");
            let w = client
                .submit_no_wait(vec![transaction])
                .await
                .expect("Should submit successfully transaction");
            all_receivers.push(w);
        }

        // construct an acceptable batch and submit, it should be accepted
        {
            let transactions: Vec<_> = (10..15)
                .map(|i| {
                    bcs::to_bytes(&format!("transaction {i}"))
                        .expect("Serialization should not fail.")
                })
                .collect();
            let w = client
                .submit_no_wait(transactions)
                .await
                .expect("Should submit successfully transaction");
            all_receivers.push(w);
        }

        // submit another individual transaction.
        {
            let i = 15;
            let transaction =
                bcs::to_bytes(&format!("transaction {i}")).expect("Serialization should not fail.");
            let w = client
                .submit_no_wait(vec![transaction])
                .await
                .expect("Shouldn't submit successfully transaction");
            all_receivers.push(w);
        }

        // construct a over-size-limit batch and submit, it should not be accepted
        {
            let transactions: Vec<_> = (16..32)
                .map(|i| {
                    bcs::to_bytes(&format!("transaction {i}"))
                        .expect("Serialization should not fail.")
                })
                .collect();
            let result = client.submit_no_wait(transactions).await.unwrap_err();
            assert_eq!(
                result.to_string(),
                "Transaction bundle size (210B) is over limit (200B)"
            );
        }

        // now pull the transactions from the consumer.
        // we expect all transactions are fetched in order, not missing any, and not exceeding the size limit.
        let mut all_acks: Vec<Box<dyn FnOnce(BlockRef)>> = Vec::new();
        let mut batch_index = 0;
        while !consumer.is_empty() {
            let (transactions, ack_transactions, _limit_reached) = consumer.next();

            assert!(
                transactions.len() as u64
                    <= context.protocol_config.max_num_transactions_in_block(),
                "Should have fetched transactions up to {}",
                context.protocol_config.max_num_transactions_in_block()
            );

            let total_size: u64 = transactions.iter().map(|t| t.data().len() as u64).sum();
            assert!(
                total_size <= context.protocol_config.max_transactions_in_block_bytes(),
                "Should have fetched transactions up to {}",
                context.protocol_config.max_transactions_in_block_bytes()
            );

            // first batch should contain all transactions from 0..10. The softbundle it is to big to fit as well, so it's parked.
            if batch_index == 0 {
                assert_eq!(transactions.len(), 10);
                for (i, transaction) in transactions.iter().enumerate() {
                    let t: String = bcs::from_bytes(transaction.data()).unwrap();
                    assert_eq!(format!("transaction {}", i).to_string(), t);
                }
            // second batch will contain the soft bundle and the additional last transaction.
            } else if batch_index == 1 {
                assert_eq!(transactions.len(), 6);
                for (i, transaction) in transactions.iter().enumerate() {
                    let t: String = bcs::from_bytes(transaction.data()).unwrap();
                    assert_eq!(format!("transaction {}", i + 10).to_string(), t);
                }
            } else {
                panic!("Unexpected batch index");
            }

            batch_index += 1;

            all_acks.push(ack_transactions);
        }

        // now acknowledge the inclusion of all transactions.
        for ack in all_acks {
            ack(BlockRef::MIN);
        }

        // expect all receivers to be resolved.
        for w in all_receivers {
            let r = w.await;
            assert!(r.is_ok());
        }
    }

    #[tokio::test]
    async fn test_submit_over_max_block_size_and_validate_block_size() {
        // submit transactions individually so we make sure that we have reached the block size limit of 10
        {
            let _guard = ProtocolConfig::apply_overrides_for_testing(|_, mut config| {
                config.set_consensus_max_transaction_size_bytes_for_testing(100);
                config.set_consensus_max_num_transactions_in_block_for_testing(10);
                config.set_consensus_max_transactions_in_block_bytes_for_testing(300);
                config
            });

            let context = Arc::new(Context::new_for_test(4).0);
            let (client, tx_receiver) = TransactionClient::new(context.clone());
            let mut consumer = TransactionConsumer::new(tx_receiver, context.clone());
            let mut all_receivers = Vec::new();

            // create enough transactions
            let max_num_transactions_in_block =
                context.protocol_config.max_num_transactions_in_block();
            for i in 0..2 * max_num_transactions_in_block {
                let transaction = bcs::to_bytes(&format!("transaction {i}"))
                    .expect("Serialization should not fail.");
                let w = client
                    .submit_no_wait(vec![transaction])
                    .await
                    .expect("Should submit successfully transaction");
                all_receivers.push(w);
            }

            // Fetch the next transactions to be included in a block
            let (transactions, _ack_transactions, limit) = consumer.next();
            assert_eq!(limit, LimitReached::MaxNumOfTransactions);
            assert_eq!(transactions.len() as u64, max_num_transactions_in_block);

            // Now create a block and verify that transactions are within the size limits
            let block_verifier =
                SignedBlockVerifier::new(context.clone(), Arc::new(NoopTransactionVerifier {}));

            let batch: Vec<_> = transactions.iter().map(|t| t.data()).collect();
            assert!(
                block_verifier.check_transactions(&batch).is_ok(),
                "Number of transactions limit verification failed"
            );
        }

        // submit transactions individually so we make sure that we have reached the block size bytes 300
        {
            let _guard = ProtocolConfig::apply_overrides_for_testing(|_, mut config| {
                config.set_consensus_max_transaction_size_bytes_for_testing(100);
                config.set_consensus_max_num_transactions_in_block_for_testing(1_000);
                config.set_consensus_max_transactions_in_block_bytes_for_testing(300);
                config
            });

            let context = Arc::new(Context::new_for_test(4).0);
            let (client, tx_receiver) = TransactionClient::new(context.clone());
            let mut consumer = TransactionConsumer::new(tx_receiver, context.clone());
            let mut all_receivers = Vec::new();

            let max_transactions_in_block_bytes =
                context.protocol_config.max_transactions_in_block_bytes();
            let mut total_size = 0;
            loop {
                let transaction = bcs::to_bytes(&"transaction".to_string())
                    .expect("Serialization should not fail.");
                total_size += transaction.len() as u64;
                let w = client
                    .submit_no_wait(vec![transaction])
                    .await
                    .expect("Should submit successfully transaction");
                all_receivers.push(w);

                // create enough transactions to reach the block size limit
                if total_size >= 2 * max_transactions_in_block_bytes {
                    break;
                }
            }

            // Fetch the next transactions to be included in a block
            let (transactions, _ack_transactions, limit) = consumer.next();
            let batch: Vec<_> = transactions.iter().map(|t| t.data()).collect();
            let size = batch.iter().map(|t| t.len() as u64).sum::<u64>();

            assert_eq!(limit, LimitReached::MaxBytes);
            assert!(
                batch.len()
                    < context
                        .protocol_config
                        .consensus_max_num_transactions_in_block() as usize,
                "Should have submitted less than the max number of transactions in a block"
            );
            assert!(size <= max_transactions_in_block_bytes);

            // Now create a block and verify that transactions are within the size limits
            let block_verifier =
                SignedBlockVerifier::new(context.clone(), Arc::new(NoopTransactionVerifier {}));

            assert!(
                block_verifier.check_transactions(&batch).is_ok(),
                "Total size of transactions limit verification failed"
            );
        }
    }
}