1mod causal_order;
5pub mod checkpoint_executor;
6mod checkpoint_output;
7mod metrics;
8
9use crate::accumulators::{self, AccumulatorSettlementTxBuilder};
10use crate::authority::AuthorityState;
11use crate::authority::epoch_start_configuration::EpochStartConfigTrait;
12use crate::authority_client::{AuthorityAPI, make_network_authority_clients_with_network_config};
13use crate::checkpoints::causal_order::CausalOrder;
14use crate::checkpoints::checkpoint_output::{CertifiedCheckpointOutput, CheckpointOutput};
15pub use crate::checkpoints::checkpoint_output::{
16 LogCheckpointOutput, SendCheckpointToStateSync, SubmitCheckpointToConsensus,
17};
18pub use crate::checkpoints::metrics::CheckpointMetrics;
19use crate::consensus_manager::ReplayWaiter;
20use crate::execution_cache::TransactionCacheRead;
21
22use crate::execution_scheduler::funds_withdraw_scheduler::FundsSettlement;
23use crate::global_state_hasher::GlobalStateHasher;
24use crate::stake_aggregator::{InsertResult, MultiStakeAggregator};
25use consensus_core::CommitRef;
26use diffy::create_patch;
27use itertools::Itertools;
28use mysten_common::random::get_rng;
29use mysten_common::sync::notify_read::{CHECKPOINT_BUILDER_NOTIFY_READ_TASK_NAME, NotifyRead};
30use mysten_common::{assert_reachable, debug_fatal, fatal, in_antithesis};
31use mysten_metrics::{MonitoredFutureExt, monitored_scope, spawn_monitored_task};
32use nonempty::NonEmpty;
33use parking_lot::Mutex;
34use pin_project_lite::pin_project;
35use serde::{Deserialize, Serialize};
36use sui_macros::fail_point_arg;
37use sui_network::default_mysten_network_config;
38use sui_types::SUI_ACCUMULATOR_ROOT_OBJECT_ID;
39use sui_types::base_types::{ConciseableName, SequenceNumber};
40use sui_types::executable_transaction::VerifiedExecutableTransaction;
41use sui_types::execution::ExecutionTimeObservationKey;
42use sui_types::messages_checkpoint::{
43 CheckpointArtifacts, CheckpointCommitment, VersionedFullCheckpointContents,
44};
45use sui_types::sui_system_state::epoch_start_sui_system_state::EpochStartSystemStateTrait;
46use tokio::sync::{mpsc, watch};
47use typed_store::rocks::{DBOptions, ReadWriteOptions, default_db_options};
48
49use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore;
50use crate::authority::authority_store_pruner::PrunerWatermarks;
51use crate::consensus_handler::SequencedConsensusTransactionKey;
52use rand::seq::SliceRandom;
53use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
54use std::fs::File;
55use std::future::Future;
56use std::io::Write;
57use std::path::Path;
58use std::pin::Pin;
59use std::sync::Arc;
60use std::sync::Weak;
61use std::task::{Context, Poll};
62use std::time::{Duration, SystemTime};
63use sui_protocol_config::ProtocolVersion;
64use sui_types::base_types::{AuthorityName, EpochId, TransactionDigest};
65use sui_types::committee::StakeUnit;
66use sui_types::crypto::AuthorityStrongQuorumSignInfo;
67use sui_types::digests::{
68 CheckpointContentsDigest, CheckpointDigest, Digest, TransactionEffectsDigest,
69};
70use sui_types::effects::{TransactionEffects, TransactionEffectsAPI};
71use sui_types::error::{SuiErrorKind, SuiResult};
72use sui_types::gas::GasCostSummary;
73use sui_types::message_envelope::Message;
74use sui_types::messages_checkpoint::{
75 CertifiedCheckpointSummary, CheckpointContents, CheckpointResponseV2, CheckpointSequenceNumber,
76 CheckpointSignatureMessage, CheckpointSummary, CheckpointSummaryResponse, CheckpointTimestamp,
77 EndOfEpochData, FullCheckpointContents, TrustedCheckpoint, VerifiedCheckpoint,
78 VerifiedCheckpointContents,
79};
80use sui_types::messages_checkpoint::{CheckpointRequestV2, SignedCheckpointSummary};
81use sui_types::messages_consensus::ConsensusTransactionKey;
82use sui_types::signature::GenericSignature;
83use sui_types::sui_system_state::{SuiSystemState, SuiSystemStateTrait};
84use sui_types::transaction::{
85 TransactionDataAPI, TransactionKey, TransactionKind, VerifiedTransaction,
86};
87use tokio::{sync::Notify, time::timeout};
88use tracing::{debug, error, info, instrument, trace, warn};
89use typed_store::DBMapUtils;
90use typed_store::Map;
91use typed_store::{
92 TypedStoreError,
93 rocks::{DBMap, MetricConf},
94};
95
96const TRANSACTION_FORK_DETECTED_KEY: u8 = 0;
97
98pub type CheckpointHeight = u64;
99
100pub struct EpochStats {
101 pub checkpoint_count: u64,
102 pub transaction_count: u64,
103 pub total_gas_reward: u64,
104}
105
106#[derive(Clone, Debug)]
107pub struct PendingCheckpointInfo {
108 pub timestamp_ms: CheckpointTimestamp,
109 pub last_of_epoch: bool,
110 pub checkpoint_height: CheckpointHeight,
113 pub consensus_commit_ref: CommitRef,
115 pub rejected_transactions_digest: Digest,
116}
117
118#[derive(Clone, Debug)]
119pub struct PendingCheckpoint {
120 pub roots: Vec<TransactionKey>,
121 pub details: PendingCheckpointInfo,
122}
123
124#[derive(Clone, Debug, Default)]
125pub struct CheckpointRoots {
126 pub tx_roots: Vec<TransactionKey>,
127 pub settlement_root: Option<TransactionKey>,
128 pub height: CheckpointHeight,
129}
130
131#[derive(Clone, Debug)]
138pub struct PendingCheckpointV2 {
139 pub roots: Vec<CheckpointRoots>,
140 pub details: PendingCheckpointInfo,
141}
142
143#[derive(Clone, Debug, Serialize, Deserialize)]
144pub struct BuilderCheckpointSummary {
145 pub summary: CheckpointSummary,
146 pub checkpoint_height: Option<CheckpointHeight>,
148 pub position_in_commit: usize,
149}
150
151#[derive(DBMapUtils)]
152#[cfg_attr(tidehunter, tidehunter)]
153pub struct CheckpointStoreTables {
154 pub(crate) checkpoint_content: DBMap<CheckpointContentsDigest, CheckpointContents>,
156
157 pub(crate) checkpoint_sequence_by_contents_digest:
159 DBMap<CheckpointContentsDigest, CheckpointSequenceNumber>,
160
161 #[default_options_override_fn = "full_checkpoint_content_table_default_config"]
165 full_checkpoint_content: DBMap<CheckpointSequenceNumber, FullCheckpointContents>,
168
169 pub(crate) certified_checkpoints: DBMap<CheckpointSequenceNumber, TrustedCheckpoint>,
171 pub(crate) checkpoint_by_digest: DBMap<CheckpointDigest, TrustedCheckpoint>,
173
174 pub(crate) locally_computed_checkpoints: DBMap<CheckpointSequenceNumber, CheckpointSummary>,
178
179 epoch_last_checkpoint_map: DBMap<EpochId, CheckpointSequenceNumber>,
181
182 pub(crate) watermarks: DBMap<CheckpointWatermark, (CheckpointSequenceNumber, CheckpointDigest)>,
185
186 pub(crate) transaction_fork_detected: DBMap<
188 u8,
189 (
190 TransactionDigest,
191 TransactionEffectsDigest,
192 TransactionEffectsDigest,
193 ),
194 >,
195 #[default_options_override_fn = "full_checkpoint_content_table_default_config"]
196 full_checkpoint_content_v2: DBMap<CheckpointSequenceNumber, VersionedFullCheckpointContents>,
197}
198
199fn full_checkpoint_content_table_default_config() -> DBOptions {
200 DBOptions {
201 options: default_db_options().options,
202 rw_options: ReadWriteOptions::default().set_log_value_hash(true),
206 }
207}
208
209impl CheckpointStoreTables {
210 #[cfg(not(tidehunter))]
211 pub fn new(path: &Path, metric_name: &'static str, _: Arc<PrunerWatermarks>) -> Self {
212 Self::open_tables_read_write(path.to_path_buf(), MetricConf::new(metric_name), None, None)
213 }
214
215 #[cfg(tidehunter)]
216 pub fn new(
217 path: &Path,
218 metric_name: &'static str,
219 pruner_watermarks: Arc<PrunerWatermarks>,
220 ) -> Self {
221 tracing::warn!("Checkpoint DB using tidehunter");
222 use crate::authority::authority_store_pruner::apply_relocation_filter;
223 use typed_store::tidehunter_util::{
224 Decision, KeySpaceConfig, KeyType, ThConfig, default_cells_per_mutex,
225 default_mutex_count, default_value_cache_size,
226 };
227 let mutexes = default_mutex_count() * 4;
228 let u64_sequence_key = KeyType::from_prefix_bits(6 * 8);
229 let override_dirty_keys_config = KeySpaceConfig::new()
230 .with_max_dirty_keys(64_000)
231 .with_value_cache_size(default_value_cache_size());
232 let config_u64 = ThConfig::new_with_config(
233 8,
234 mutexes,
235 u64_sequence_key,
236 override_dirty_keys_config.clone(),
237 );
238 let digest_config = ThConfig::new_with_rm_prefix(
239 32,
240 mutexes,
241 KeyType::uniform(default_cells_per_mutex()),
242 KeySpaceConfig::default(),
243 vec![0, 0, 0, 0, 0, 0, 0, 32],
244 );
245 let watermarks_config = KeySpaceConfig::new()
246 .with_value_cache_size(10)
247 .disable_unload();
248 let lru_config = KeySpaceConfig::new().with_value_cache_size(default_value_cache_size());
249 let configs = vec![
250 (
251 "checkpoint_content",
252 digest_config.clone().with_config(
253 lru_config
254 .clone()
255 .with_relocation_filter(|_, _| Decision::Remove),
256 ),
257 ),
258 (
259 "checkpoint_sequence_by_contents_digest",
260 digest_config.clone().with_config(apply_relocation_filter(
261 KeySpaceConfig::default(),
262 pruner_watermarks.checkpoint_id.clone(),
263 |sequence_number: CheckpointSequenceNumber| sequence_number,
264 false,
265 )),
266 ),
267 (
268 "full_checkpoint_content",
269 config_u64.clone().with_config(apply_relocation_filter(
270 override_dirty_keys_config.clone(),
271 pruner_watermarks.checkpoint_id.clone(),
272 |sequence_number: CheckpointSequenceNumber| sequence_number,
273 true,
274 )),
275 ),
276 ("certified_checkpoints", config_u64.clone()),
277 (
278 "checkpoint_by_digest",
279 digest_config.clone().with_config(apply_relocation_filter(
280 lru_config,
281 pruner_watermarks.epoch_id.clone(),
282 |checkpoint: TrustedCheckpoint| checkpoint.inner().epoch,
283 false,
284 )),
285 ),
286 (
287 "locally_computed_checkpoints",
288 config_u64.clone().with_config(apply_relocation_filter(
289 override_dirty_keys_config.clone(),
290 pruner_watermarks.checkpoint_id.clone(),
291 |checkpoint_id: CheckpointSequenceNumber| checkpoint_id,
292 true,
293 )),
294 ),
295 ("epoch_last_checkpoint_map", config_u64.clone()),
296 (
297 "watermarks",
298 ThConfig::new_with_config(4, 1, KeyType::uniform(1), watermarks_config.clone()),
299 ),
300 (
301 "transaction_fork_detected",
302 ThConfig::new_with_config(
303 1,
304 1,
305 KeyType::uniform(1),
306 watermarks_config.with_relocation_filter(|_, _| Decision::Remove),
307 ),
308 ),
309 (
310 "full_checkpoint_content_v2",
311 config_u64.clone().with_config(apply_relocation_filter(
312 override_dirty_keys_config.clone(),
313 pruner_watermarks.checkpoint_id.clone(),
314 |sequence_number: CheckpointSequenceNumber| sequence_number,
315 true,
316 )),
317 ),
318 ];
319 Self::open_tables_read_write(
320 path.to_path_buf(),
321 MetricConf::new(metric_name),
322 configs
323 .into_iter()
324 .map(|(cf, config)| (cf.to_string(), config))
325 .collect(),
326 )
327 }
328
329 pub fn open_readonly(path: &Path) -> CheckpointStoreTablesReadOnly {
330 Self::get_read_only_handle(
331 path.to_path_buf(),
332 None,
333 None,
334 MetricConf::new("checkpoint_readonly"),
335 )
336 }
337}
338
339pub struct CheckpointStore {
340 pub(crate) tables: CheckpointStoreTables,
341 synced_checkpoint_notify_read: NotifyRead<CheckpointSequenceNumber, VerifiedCheckpoint>,
342 executed_checkpoint_notify_read: NotifyRead<CheckpointSequenceNumber, VerifiedCheckpoint>,
343}
344
345impl CheckpointStore {
346 pub fn new(path: &Path, pruner_watermarks: Arc<PrunerWatermarks>) -> Arc<Self> {
347 let tables = CheckpointStoreTables::new(path, "checkpoint", pruner_watermarks);
348 Arc::new(Self {
349 tables,
350 synced_checkpoint_notify_read: NotifyRead::new(),
351 executed_checkpoint_notify_read: NotifyRead::new(),
352 })
353 }
354
355 pub fn new_for_tests() -> Arc<Self> {
356 let ckpt_dir = mysten_common::tempdir().unwrap();
357 CheckpointStore::new(ckpt_dir.path(), Arc::new(PrunerWatermarks::default()))
358 }
359
360 pub fn new_for_db_checkpoint_handler(path: &Path) -> Arc<Self> {
361 let tables = CheckpointStoreTables::new(
362 path,
363 "db_checkpoint",
364 Arc::new(PrunerWatermarks::default()),
365 );
366 Arc::new(Self {
367 tables,
368 synced_checkpoint_notify_read: NotifyRead::new(),
369 executed_checkpoint_notify_read: NotifyRead::new(),
370 })
371 }
372
373 pub fn open_readonly(path: &Path) -> CheckpointStoreTablesReadOnly {
374 CheckpointStoreTables::open_readonly(path)
375 }
376
377 #[instrument(level = "info", skip_all)]
378 pub fn insert_genesis_checkpoint(
379 &self,
380 checkpoint: VerifiedCheckpoint,
381 contents: CheckpointContents,
382 epoch_store: &AuthorityPerEpochStore,
383 ) {
384 assert_eq!(
385 checkpoint.epoch(),
386 0,
387 "can't call insert_genesis_checkpoint with a checkpoint not in epoch 0"
388 );
389 assert_eq!(
390 *checkpoint.sequence_number(),
391 0,
392 "can't call insert_genesis_checkpoint with a checkpoint that doesn't have a sequence number of 0"
393 );
394
395 match self.get_checkpoint_by_sequence_number(0).unwrap() {
397 Some(existing_checkpoint) => {
398 assert_eq!(existing_checkpoint.digest(), checkpoint.digest())
399 }
400 None => {
401 if epoch_store.epoch() == checkpoint.epoch {
402 epoch_store
403 .put_genesis_checkpoint_in_builder(checkpoint.data(), &contents)
404 .unwrap();
405 } else {
406 debug!(
407 validator_epoch =% epoch_store.epoch(),
408 genesis_epoch =% checkpoint.epoch(),
409 "Not inserting checkpoint builder data for genesis checkpoint",
410 );
411 }
412 self.insert_checkpoint_contents(contents).unwrap();
413 self.insert_verified_checkpoint(&checkpoint).unwrap();
414 self.update_highest_synced_checkpoint(&checkpoint).unwrap();
415 }
416 }
417 }
418
419 pub fn get_checkpoint_by_digest(
420 &self,
421 digest: &CheckpointDigest,
422 ) -> Result<Option<VerifiedCheckpoint>, TypedStoreError> {
423 self.tables
424 .checkpoint_by_digest
425 .get(digest)
426 .map(|maybe_checkpoint| maybe_checkpoint.map(|c| c.into()))
427 }
428
429 pub fn get_checkpoint_by_sequence_number(
430 &self,
431 sequence_number: CheckpointSequenceNumber,
432 ) -> Result<Option<VerifiedCheckpoint>, TypedStoreError> {
433 self.tables
434 .certified_checkpoints
435 .get(&sequence_number)
436 .map(|maybe_checkpoint| maybe_checkpoint.map(|c| c.into()))
437 }
438
439 pub fn get_locally_computed_checkpoint(
440 &self,
441 sequence_number: CheckpointSequenceNumber,
442 ) -> Result<Option<CheckpointSummary>, TypedStoreError> {
443 self.tables
444 .locally_computed_checkpoints
445 .get(&sequence_number)
446 }
447
448 pub fn multi_get_locally_computed_checkpoints(
449 &self,
450 sequence_numbers: &[CheckpointSequenceNumber],
451 ) -> Result<Vec<Option<CheckpointSummary>>, TypedStoreError> {
452 let checkpoints = self
453 .tables
454 .locally_computed_checkpoints
455 .multi_get(sequence_numbers)?;
456
457 Ok(checkpoints)
458 }
459
460 pub fn get_sequence_number_by_contents_digest(
461 &self,
462 digest: &CheckpointContentsDigest,
463 ) -> Result<Option<CheckpointSequenceNumber>, TypedStoreError> {
464 self.tables
465 .checkpoint_sequence_by_contents_digest
466 .get(digest)
467 }
468
469 pub fn delete_contents_digest_sequence_number_mapping(
470 &self,
471 digest: &CheckpointContentsDigest,
472 ) -> Result<(), TypedStoreError> {
473 self.tables
474 .checkpoint_sequence_by_contents_digest
475 .remove(digest)
476 }
477
478 pub fn get_latest_certified_checkpoint(
479 &self,
480 ) -> Result<Option<VerifiedCheckpoint>, TypedStoreError> {
481 Ok(self
482 .tables
483 .certified_checkpoints
484 .reversed_safe_iter_with_bounds(None, None)?
485 .next()
486 .transpose()?
487 .map(|(_, v)| v.into()))
488 }
489
490 pub fn get_latest_locally_computed_checkpoint(
491 &self,
492 ) -> Result<Option<CheckpointSummary>, TypedStoreError> {
493 Ok(self
494 .tables
495 .locally_computed_checkpoints
496 .reversed_safe_iter_with_bounds(None, None)?
497 .next()
498 .transpose()?
499 .map(|(_, v)| v))
500 }
501
502 pub fn multi_get_checkpoint_by_sequence_number(
503 &self,
504 sequence_numbers: &[CheckpointSequenceNumber],
505 ) -> Result<Vec<Option<VerifiedCheckpoint>>, TypedStoreError> {
506 let checkpoints = self
507 .tables
508 .certified_checkpoints
509 .multi_get(sequence_numbers)?
510 .into_iter()
511 .map(|maybe_checkpoint| maybe_checkpoint.map(|c| c.into()))
512 .collect();
513
514 Ok(checkpoints)
515 }
516
517 pub fn multi_get_checkpoint_content(
518 &self,
519 contents_digest: &[CheckpointContentsDigest],
520 ) -> Result<Vec<Option<CheckpointContents>>, TypedStoreError> {
521 self.tables.checkpoint_content.multi_get(contents_digest)
522 }
523
524 pub fn get_highest_verified_checkpoint(
525 &self,
526 ) -> Result<Option<VerifiedCheckpoint>, TypedStoreError> {
527 let highest_verified = if let Some(highest_verified) = self
528 .tables
529 .watermarks
530 .get(&CheckpointWatermark::HighestVerified)?
531 {
532 highest_verified
533 } else {
534 return Ok(None);
535 };
536 self.get_checkpoint_by_digest(&highest_verified.1)
537 }
538
539 pub fn get_highest_synced_checkpoint(
540 &self,
541 ) -> Result<Option<VerifiedCheckpoint>, TypedStoreError> {
542 let highest_synced = if let Some(highest_synced) = self
543 .tables
544 .watermarks
545 .get(&CheckpointWatermark::HighestSynced)?
546 {
547 highest_synced
548 } else {
549 return Ok(None);
550 };
551 self.get_checkpoint_by_digest(&highest_synced.1)
552 }
553
554 pub fn get_highest_synced_checkpoint_seq_number(
555 &self,
556 ) -> Result<Option<CheckpointSequenceNumber>, TypedStoreError> {
557 if let Some(highest_synced) = self
558 .tables
559 .watermarks
560 .get(&CheckpointWatermark::HighestSynced)?
561 {
562 Ok(Some(highest_synced.0))
563 } else {
564 Ok(None)
565 }
566 }
567
568 pub fn get_highest_executed_checkpoint_seq_number(
569 &self,
570 ) -> Result<Option<CheckpointSequenceNumber>, TypedStoreError> {
571 if let Some(highest_executed) = self
572 .tables
573 .watermarks
574 .get(&CheckpointWatermark::HighestExecuted)?
575 {
576 Ok(Some(highest_executed.0))
577 } else {
578 Ok(None)
579 }
580 }
581
582 pub fn get_highest_executed_checkpoint(
583 &self,
584 ) -> Result<Option<VerifiedCheckpoint>, TypedStoreError> {
585 let highest_executed = if let Some(highest_executed) = self
586 .tables
587 .watermarks
588 .get(&CheckpointWatermark::HighestExecuted)?
589 {
590 highest_executed
591 } else {
592 return Ok(None);
593 };
594 self.get_checkpoint_by_digest(&highest_executed.1)
595 }
596
597 pub fn get_highest_pruned_checkpoint_seq_number(
598 &self,
599 ) -> Result<Option<CheckpointSequenceNumber>, TypedStoreError> {
600 self.tables
601 .watermarks
602 .get(&CheckpointWatermark::HighestPruned)
603 .map(|watermark| watermark.map(|w| w.0))
604 }
605
606 pub fn get_checkpoint_contents(
607 &self,
608 digest: &CheckpointContentsDigest,
609 ) -> Result<Option<CheckpointContents>, TypedStoreError> {
610 self.tables.checkpoint_content.get(digest)
611 }
612
613 pub fn get_full_checkpoint_contents_by_sequence_number(
614 &self,
615 seq: CheckpointSequenceNumber,
616 ) -> Result<Option<VersionedFullCheckpointContents>, TypedStoreError> {
617 self.tables.full_checkpoint_content_v2.get(&seq)
618 }
619
620 fn prune_local_summaries(&self) -> SuiResult {
621 if let Some((last_local_summary, _)) = self
622 .tables
623 .locally_computed_checkpoints
624 .reversed_safe_iter_with_bounds(None, None)?
625 .next()
626 .transpose()?
627 {
628 let mut batch = self.tables.locally_computed_checkpoints.batch();
629 batch.schedule_delete_range(
630 &self.tables.locally_computed_checkpoints,
631 &0,
632 &last_local_summary,
633 )?;
634 batch.write()?;
635 info!("Pruned local summaries up to {:?}", last_local_summary);
636 }
637 Ok(())
638 }
639
640 pub fn clear_locally_computed_checkpoints_from(
641 &self,
642 from_seq: CheckpointSequenceNumber,
643 ) -> SuiResult {
644 let keys: Vec<_> = self
645 .tables
646 .locally_computed_checkpoints
647 .safe_iter_with_bounds(Some(from_seq), None)
648 .map(|r| r.map(|(k, _)| k))
649 .collect::<Result<_, _>>()?;
650 if let Some(&last_local_summary) = keys.last() {
651 let mut batch = self.tables.locally_computed_checkpoints.batch();
652 batch
653 .delete_batch(&self.tables.locally_computed_checkpoints, keys.iter())
654 .expect("Failed to delete locally computed checkpoints");
655 batch
656 .write()
657 .expect("Failed to delete locally computed checkpoints");
658 warn!(
659 from_seq,
660 last_local_summary,
661 "Cleared locally_computed_checkpoints from {} (inclusive) through {} (inclusive)",
662 from_seq,
663 last_local_summary
664 );
665 }
666 Ok(())
667 }
668
669 fn check_for_checkpoint_fork(
670 &self,
671 local_checkpoint: &CheckpointSummary,
672 verified_checkpoint: &VerifiedCheckpoint,
673 ) {
674 if local_checkpoint != verified_checkpoint.data() {
675 let verified_contents = self
676 .get_checkpoint_contents(&verified_checkpoint.content_digest)
677 .map(|opt_contents| {
678 opt_contents
679 .map(|contents| format!("{:?}", contents))
680 .unwrap_or_else(|| {
681 format!(
682 "Verified checkpoint contents not found, digest: {:?}",
683 verified_checkpoint.content_digest,
684 )
685 })
686 })
687 .map_err(|e| {
688 format!(
689 "Failed to get verified checkpoint contents, digest: {:?} error: {:?}",
690 verified_checkpoint.content_digest, e
691 )
692 })
693 .unwrap_or_else(|err_msg| err_msg);
694
695 let local_contents = self
696 .get_checkpoint_contents(&local_checkpoint.content_digest)
697 .map(|opt_contents| {
698 opt_contents
699 .map(|contents| format!("{:?}", contents))
700 .unwrap_or_else(|| {
701 format!(
702 "Local checkpoint contents not found, digest: {:?}",
703 local_checkpoint.content_digest
704 )
705 })
706 })
707 .map_err(|e| {
708 format!(
709 "Failed to get local checkpoint contents, digest: {:?} error: {:?}",
710 local_checkpoint.content_digest, e
711 )
712 })
713 .unwrap_or_else(|err_msg| err_msg);
714
715 error!(
717 verified_checkpoint = ?verified_checkpoint.data(),
718 ?verified_contents,
719 ?local_checkpoint,
720 ?local_contents,
721 "Local checkpoint fork detected!",
722 );
723
724 if let Err(e) = self.record_checkpoint_fork_detected(
726 *local_checkpoint.sequence_number(),
727 local_checkpoint.digest(),
728 ) {
729 error!("Failed to record checkpoint fork in database: {:?}", e);
730 }
731
732 fail_point_arg!(
733 "kill_checkpoint_fork_node",
734 |checkpoint_overrides: std::sync::Arc<
735 std::sync::Mutex<std::collections::BTreeMap<u64, String>>,
736 >| {
737 #[cfg(msim)]
738 {
739 if let Ok(mut overrides) = checkpoint_overrides.lock() {
740 overrides.insert(
741 local_checkpoint.sequence_number,
742 verified_checkpoint.digest().to_string(),
743 );
744 }
745 tracing::error!(
746 fatal = true,
747 "Fork recovery test: killing node due to checkpoint fork for sequence number: {}, using verified digest: {}",
748 local_checkpoint.sequence_number(),
749 verified_checkpoint.digest()
750 );
751 sui_simulator::task::shutdown_current_node();
752 }
753 }
754 );
755
756 fatal!(
757 "Local checkpoint fork detected for sequence number: {}",
758 local_checkpoint.sequence_number()
759 );
760 }
761 }
762
763 pub fn insert_certified_checkpoint(
769 &self,
770 checkpoint: &VerifiedCheckpoint,
771 ) -> Result<(), TypedStoreError> {
772 debug!(
773 checkpoint_seq = checkpoint.sequence_number(),
774 "Inserting certified checkpoint",
775 );
776 let mut batch = self.tables.certified_checkpoints.batch();
777 batch
778 .insert_batch(
779 &self.tables.certified_checkpoints,
780 [(checkpoint.sequence_number(), checkpoint.serializable_ref())],
781 )?
782 .insert_batch(
783 &self.tables.checkpoint_by_digest,
784 [(checkpoint.digest(), checkpoint.serializable_ref())],
785 )?;
786 if checkpoint.next_epoch_committee().is_some() {
787 batch.insert_batch(
788 &self.tables.epoch_last_checkpoint_map,
789 [(&checkpoint.epoch(), checkpoint.sequence_number())],
790 )?;
791 }
792 batch.write()?;
793
794 if let Some(local_checkpoint) = self
795 .tables
796 .locally_computed_checkpoints
797 .get(checkpoint.sequence_number())?
798 {
799 self.check_for_checkpoint_fork(&local_checkpoint, checkpoint);
800 }
801
802 Ok(())
803 }
804
805 #[instrument(level = "debug", skip_all)]
808 pub fn insert_verified_checkpoint(
809 &self,
810 checkpoint: &VerifiedCheckpoint,
811 ) -> Result<(), TypedStoreError> {
812 self.insert_certified_checkpoint(checkpoint)?;
813 self.update_highest_verified_checkpoint(checkpoint)
814 }
815
816 pub fn update_highest_verified_checkpoint(
817 &self,
818 checkpoint: &VerifiedCheckpoint,
819 ) -> Result<(), TypedStoreError> {
820 if Some(*checkpoint.sequence_number())
821 > self
822 .get_highest_verified_checkpoint()?
823 .map(|x| *x.sequence_number())
824 {
825 debug!(
826 checkpoint_seq = checkpoint.sequence_number(),
827 "Updating highest verified checkpoint",
828 );
829 self.tables.watermarks.insert(
830 &CheckpointWatermark::HighestVerified,
831 &(*checkpoint.sequence_number(), *checkpoint.digest()),
832 )?;
833 }
834
835 Ok(())
836 }
837
838 pub fn update_highest_synced_checkpoint(
839 &self,
840 checkpoint: &VerifiedCheckpoint,
841 ) -> Result<(), TypedStoreError> {
842 let seq = *checkpoint.sequence_number();
843 debug!(checkpoint_seq = seq, "Updating highest synced checkpoint",);
844 self.tables.watermarks.insert(
845 &CheckpointWatermark::HighestSynced,
846 &(seq, *checkpoint.digest()),
847 )?;
848 self.synced_checkpoint_notify_read.notify(&seq, checkpoint);
849 Ok(())
850 }
851
852 async fn notify_read_checkpoint_watermark<F>(
853 &self,
854 notify_read: &NotifyRead<CheckpointSequenceNumber, VerifiedCheckpoint>,
855 seq: CheckpointSequenceNumber,
856 get_watermark: F,
857 ) -> VerifiedCheckpoint
858 where
859 F: Fn() -> Option<CheckpointSequenceNumber>,
860 {
861 notify_read
862 .read("notify_read_checkpoint_watermark", &[seq], |seqs| {
863 let seq = seqs[0];
864 let Some(highest) = get_watermark() else {
865 return vec![None];
866 };
867 if highest < seq {
868 return vec![None];
869 }
870 let checkpoint = self
871 .get_checkpoint_by_sequence_number(seq)
872 .expect("db error")
873 .expect("checkpoint not found");
874 vec![Some(checkpoint)]
875 })
876 .await
877 .into_iter()
878 .next()
879 .unwrap()
880 }
881
882 pub async fn notify_read_synced_checkpoint(
883 &self,
884 seq: CheckpointSequenceNumber,
885 ) -> VerifiedCheckpoint {
886 self.notify_read_checkpoint_watermark(&self.synced_checkpoint_notify_read, seq, || {
887 self.get_highest_synced_checkpoint_seq_number()
888 .expect("db error")
889 })
890 .await
891 }
892
893 pub async fn notify_read_executed_checkpoint(
894 &self,
895 seq: CheckpointSequenceNumber,
896 ) -> VerifiedCheckpoint {
897 self.notify_read_checkpoint_watermark(&self.executed_checkpoint_notify_read, seq, || {
898 self.get_highest_executed_checkpoint_seq_number()
899 .expect("db error")
900 })
901 .await
902 }
903
904 pub fn update_highest_executed_checkpoint(
905 &self,
906 checkpoint: &VerifiedCheckpoint,
907 ) -> Result<(), TypedStoreError> {
908 if let Some(seq_number) = self.get_highest_executed_checkpoint_seq_number()? {
909 if seq_number >= *checkpoint.sequence_number() {
910 return Ok(());
911 }
912 assert_eq!(
913 seq_number + 1,
914 *checkpoint.sequence_number(),
915 "Cannot update highest executed checkpoint to {} when current highest executed checkpoint is {}",
916 checkpoint.sequence_number(),
917 seq_number
918 );
919 }
920 let seq = *checkpoint.sequence_number();
921 debug!(checkpoint_seq = seq, "Updating highest executed checkpoint",);
922 self.tables.watermarks.insert(
923 &CheckpointWatermark::HighestExecuted,
924 &(seq, *checkpoint.digest()),
925 )?;
926 self.executed_checkpoint_notify_read
927 .notify(&seq, checkpoint);
928 Ok(())
929 }
930
931 pub fn update_highest_pruned_checkpoint(
932 &self,
933 checkpoint: &VerifiedCheckpoint,
934 ) -> Result<(), TypedStoreError> {
935 self.tables.watermarks.insert(
936 &CheckpointWatermark::HighestPruned,
937 &(*checkpoint.sequence_number(), *checkpoint.digest()),
938 )
939 }
940
941 pub fn set_highest_executed_checkpoint_subtle(
946 &self,
947 checkpoint: &VerifiedCheckpoint,
948 ) -> Result<(), TypedStoreError> {
949 self.tables.watermarks.insert(
950 &CheckpointWatermark::HighestExecuted,
951 &(*checkpoint.sequence_number(), *checkpoint.digest()),
952 )
953 }
954
955 pub fn insert_checkpoint_contents(
956 &self,
957 contents: CheckpointContents,
958 ) -> Result<(), TypedStoreError> {
959 debug!(
960 checkpoint_seq = ?contents.digest(),
961 "Inserting checkpoint contents",
962 );
963 self.tables
964 .checkpoint_content
965 .insert(contents.digest(), &contents)
966 }
967
968 pub fn insert_verified_checkpoint_contents(
969 &self,
970 checkpoint: &VerifiedCheckpoint,
971 full_contents: VerifiedCheckpointContents,
972 ) -> Result<(), TypedStoreError> {
973 let mut batch = self.tables.full_checkpoint_content_v2.batch();
974 batch.insert_batch(
975 &self.tables.checkpoint_sequence_by_contents_digest,
976 [(&checkpoint.content_digest, checkpoint.sequence_number())],
977 )?;
978 let full_contents = full_contents.into_inner();
979 batch.insert_batch(
980 &self.tables.full_checkpoint_content_v2,
981 [(checkpoint.sequence_number(), &full_contents)],
982 )?;
983
984 let contents = full_contents.into_checkpoint_contents();
985 assert_eq!(&checkpoint.content_digest, contents.digest());
986
987 batch.insert_batch(
988 &self.tables.checkpoint_content,
989 [(contents.digest(), &contents)],
990 )?;
991
992 batch.write()
993 }
994
995 pub fn delete_full_checkpoint_contents(
996 &self,
997 seq: CheckpointSequenceNumber,
998 ) -> Result<(), TypedStoreError> {
999 self.tables.full_checkpoint_content.remove(&seq)?;
1000 self.tables.full_checkpoint_content_v2.remove(&seq)
1001 }
1002
1003 pub fn get_epoch_last_checkpoint(
1004 &self,
1005 epoch_id: EpochId,
1006 ) -> SuiResult<Option<VerifiedCheckpoint>> {
1007 let seq = self.get_epoch_last_checkpoint_seq_number(epoch_id)?;
1008 let checkpoint = match seq {
1009 Some(seq) => self.get_checkpoint_by_sequence_number(seq)?,
1010 None => None,
1011 };
1012 Ok(checkpoint)
1013 }
1014
1015 pub fn get_epoch_last_checkpoint_seq_number(
1016 &self,
1017 epoch_id: EpochId,
1018 ) -> SuiResult<Option<CheckpointSequenceNumber>> {
1019 let seq = self.tables.epoch_last_checkpoint_map.get(&epoch_id)?;
1020 Ok(seq)
1021 }
1022
1023 pub fn insert_epoch_last_checkpoint(
1024 &self,
1025 epoch_id: EpochId,
1026 checkpoint: &VerifiedCheckpoint,
1027 ) -> SuiResult {
1028 self.tables
1029 .epoch_last_checkpoint_map
1030 .insert(&epoch_id, checkpoint.sequence_number())?;
1031 Ok(())
1032 }
1033
1034 pub fn get_epoch_state_commitments(
1035 &self,
1036 epoch: EpochId,
1037 ) -> SuiResult<Option<Vec<CheckpointCommitment>>> {
1038 let commitments = self.get_epoch_last_checkpoint(epoch)?.map(|checkpoint| {
1039 checkpoint
1040 .end_of_epoch_data
1041 .as_ref()
1042 .expect("Last checkpoint of epoch expected to have EndOfEpochData")
1043 .epoch_commitments
1044 .clone()
1045 });
1046 Ok(commitments)
1047 }
1048
1049 pub fn get_epoch_stats(
1051 &self,
1052 epoch: EpochId,
1053 last_checkpoint: &CheckpointSummary,
1054 ) -> Option<EpochStats> {
1055 let (first_checkpoint, prev_epoch_network_transactions) = if epoch == 0 {
1056 (0, 0)
1057 } else if let Ok(Some(checkpoint)) = self.get_epoch_last_checkpoint(epoch - 1) {
1058 (
1059 checkpoint.sequence_number + 1,
1060 checkpoint.network_total_transactions,
1061 )
1062 } else {
1063 return None;
1064 };
1065 Some(EpochStats {
1066 checkpoint_count: last_checkpoint.sequence_number - first_checkpoint + 1,
1067 transaction_count: last_checkpoint.network_total_transactions
1068 - prev_epoch_network_transactions,
1069 total_gas_reward: last_checkpoint
1070 .epoch_rolling_gas_cost_summary
1071 .computation_cost,
1072 })
1073 }
1074
1075 pub fn checkpoint_db(&self, path: &Path) -> SuiResult {
1076 self.tables
1078 .checkpoint_content
1079 .checkpoint_db(path)
1080 .map_err(Into::into)
1081 }
1082
1083 pub fn delete_highest_executed_checkpoint_test_only(&self) -> Result<(), TypedStoreError> {
1084 let mut wb = self.tables.watermarks.batch();
1085 wb.delete_batch(
1086 &self.tables.watermarks,
1087 std::iter::once(CheckpointWatermark::HighestExecuted),
1088 )?;
1089 wb.write()?;
1090 Ok(())
1091 }
1092
1093 pub fn reset_db_for_execution_since_genesis(&self) -> SuiResult {
1094 self.delete_highest_executed_checkpoint_test_only()?;
1095 Ok(())
1096 }
1097
1098 pub fn record_checkpoint_fork_detected(
1099 &self,
1100 checkpoint_seq: CheckpointSequenceNumber,
1101 checkpoint_digest: CheckpointDigest,
1102 ) -> Result<(), TypedStoreError> {
1103 info!(
1104 checkpoint_seq = checkpoint_seq,
1105 checkpoint_digest = ?checkpoint_digest,
1106 "Recording checkpoint fork detection in database"
1107 );
1108 self.tables.watermarks.insert(
1109 &CheckpointWatermark::CheckpointForkDetected,
1110 &(checkpoint_seq, checkpoint_digest),
1111 )
1112 }
1113
1114 pub fn get_checkpoint_fork_detected(
1115 &self,
1116 ) -> Result<Option<(CheckpointSequenceNumber, CheckpointDigest)>, TypedStoreError> {
1117 self.tables
1118 .watermarks
1119 .get(&CheckpointWatermark::CheckpointForkDetected)
1120 }
1121
1122 pub fn clear_checkpoint_fork_detected(&self) -> Result<(), TypedStoreError> {
1123 self.tables
1124 .watermarks
1125 .remove(&CheckpointWatermark::CheckpointForkDetected)
1126 }
1127
1128 pub fn record_transaction_fork_detected(
1129 &self,
1130 tx_digest: TransactionDigest,
1131 expected_effects_digest: TransactionEffectsDigest,
1132 actual_effects_digest: TransactionEffectsDigest,
1133 ) -> Result<(), TypedStoreError> {
1134 info!(
1135 tx_digest = ?tx_digest,
1136 expected_effects_digest = ?expected_effects_digest,
1137 actual_effects_digest = ?actual_effects_digest,
1138 "Recording transaction fork detection in database"
1139 );
1140 self.tables.transaction_fork_detected.insert(
1141 &TRANSACTION_FORK_DETECTED_KEY,
1142 &(tx_digest, expected_effects_digest, actual_effects_digest),
1143 )
1144 }
1145
1146 pub fn get_transaction_fork_detected(
1147 &self,
1148 ) -> Result<
1149 Option<(
1150 TransactionDigest,
1151 TransactionEffectsDigest,
1152 TransactionEffectsDigest,
1153 )>,
1154 TypedStoreError,
1155 > {
1156 self.tables
1157 .transaction_fork_detected
1158 .get(&TRANSACTION_FORK_DETECTED_KEY)
1159 }
1160
1161 pub fn clear_transaction_fork_detected(&self) -> Result<(), TypedStoreError> {
1162 self.tables
1163 .transaction_fork_detected
1164 .remove(&TRANSACTION_FORK_DETECTED_KEY)
1165 }
1166}
1167
1168#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
1169pub enum CheckpointWatermark {
1170 HighestVerified,
1171 HighestSynced,
1172 HighestExecuted,
1173 HighestPruned,
1174 CheckpointForkDetected,
1175}
1176
1177struct CheckpointStateHasher {
1178 epoch_store: Arc<AuthorityPerEpochStore>,
1179 hasher: Weak<GlobalStateHasher>,
1180 receive_from_builder: mpsc::Receiver<(CheckpointSequenceNumber, Vec<TransactionEffects>)>,
1181}
1182
1183impl CheckpointStateHasher {
1184 fn new(
1185 epoch_store: Arc<AuthorityPerEpochStore>,
1186 hasher: Weak<GlobalStateHasher>,
1187 receive_from_builder: mpsc::Receiver<(CheckpointSequenceNumber, Vec<TransactionEffects>)>,
1188 ) -> Self {
1189 Self {
1190 epoch_store,
1191 hasher,
1192 receive_from_builder,
1193 }
1194 }
1195
1196 async fn run(self) {
1197 let Self {
1198 epoch_store,
1199 hasher,
1200 mut receive_from_builder,
1201 } = self;
1202 while let Some((seq, effects)) = receive_from_builder.recv().await {
1203 let Some(hasher) = hasher.upgrade() else {
1204 info!("Object state hasher was dropped, stopping checkpoint accumulation");
1205 break;
1206 };
1207 hasher
1208 .accumulate_checkpoint(&effects, seq, &epoch_store)
1209 .expect("epoch ended while accumulating checkpoint");
1210 }
1211 }
1212}
1213
1214#[derive(Debug)]
1215pub enum CheckpointBuilderError {
1216 ChangeEpochTxAlreadyExecuted,
1217 SystemPackagesMissing,
1218 Retry(anyhow::Error),
1219}
1220
1221impl<SuiError: std::error::Error + Send + Sync + 'static> From<SuiError>
1222 for CheckpointBuilderError
1223{
1224 fn from(e: SuiError) -> Self {
1225 Self::Retry(e.into())
1226 }
1227}
1228
1229pub type CheckpointBuilderResult<T = ()> = Result<T, CheckpointBuilderError>;
1230
1231pub struct CheckpointBuilder {
1232 state: Arc<AuthorityState>,
1233 store: Arc<CheckpointStore>,
1234 epoch_store: Arc<AuthorityPerEpochStore>,
1235 notify: Arc<Notify>,
1236 notify_aggregator: Arc<Notify>,
1237 last_built: watch::Sender<CheckpointSequenceNumber>,
1238 effects_store: Arc<dyn TransactionCacheRead>,
1239 global_state_hasher: Weak<GlobalStateHasher>,
1240 send_to_hasher: mpsc::Sender<(CheckpointSequenceNumber, Vec<TransactionEffects>)>,
1241 output: Box<dyn CheckpointOutput>,
1242 metrics: Arc<CheckpointMetrics>,
1243 max_transactions_per_checkpoint: usize,
1244 max_checkpoint_size_bytes: usize,
1245}
1246
1247pub struct CheckpointAggregator {
1248 store: Arc<CheckpointStore>,
1249 epoch_store: Arc<AuthorityPerEpochStore>,
1250 notify: Arc<Notify>,
1251 current: Option<CheckpointSignatureAggregator>,
1252 output: Box<dyn CertifiedCheckpointOutput>,
1253 state: Arc<AuthorityState>,
1254 metrics: Arc<CheckpointMetrics>,
1255}
1256
1257pub struct CheckpointSignatureAggregator {
1259 next_index: u64,
1260 summary: CheckpointSummary,
1261 digest: CheckpointDigest,
1262 signatures_by_digest: MultiStakeAggregator<CheckpointDigest, CheckpointSummary, true>,
1264 store: Arc<CheckpointStore>,
1265 state: Arc<AuthorityState>,
1266 metrics: Arc<CheckpointMetrics>,
1267}
1268
1269impl CheckpointBuilder {
1270 fn new(
1271 state: Arc<AuthorityState>,
1272 store: Arc<CheckpointStore>,
1273 epoch_store: Arc<AuthorityPerEpochStore>,
1274 notify: Arc<Notify>,
1275 effects_store: Arc<dyn TransactionCacheRead>,
1276 global_state_hasher: Weak<GlobalStateHasher>,
1278 send_to_hasher: mpsc::Sender<(CheckpointSequenceNumber, Vec<TransactionEffects>)>,
1280 output: Box<dyn CheckpointOutput>,
1281 notify_aggregator: Arc<Notify>,
1282 last_built: watch::Sender<CheckpointSequenceNumber>,
1283 metrics: Arc<CheckpointMetrics>,
1284 max_transactions_per_checkpoint: usize,
1285 max_checkpoint_size_bytes: usize,
1286 ) -> Self {
1287 Self {
1288 state,
1289 store,
1290 epoch_store,
1291 notify,
1292 effects_store,
1293 global_state_hasher,
1294 send_to_hasher,
1295 output,
1296 notify_aggregator,
1297 last_built,
1298 metrics,
1299 max_transactions_per_checkpoint,
1300 max_checkpoint_size_bytes,
1301 }
1302 }
1303
1304 async fn run(mut self, consensus_replay_waiter: Option<ReplayWaiter>) {
1312 if let Some(replay_waiter) = consensus_replay_waiter {
1313 info!("Waiting for consensus commits to replay ...");
1314 replay_waiter.wait_for_replay().await;
1315 info!("Consensus commits finished replaying");
1316 }
1317 info!("Starting CheckpointBuilder");
1318 loop {
1319 match self.maybe_build_checkpoints().await {
1320 Ok(()) => {}
1321 err @ Err(
1322 CheckpointBuilderError::ChangeEpochTxAlreadyExecuted
1323 | CheckpointBuilderError::SystemPackagesMissing,
1324 ) => {
1325 info!("CheckpointBuilder stopping: {:?}", err);
1326 return;
1327 }
1328 Err(CheckpointBuilderError::Retry(inner)) => {
1329 let msg = format!("{:?}", inner);
1330 debug_fatal!("Error while making checkpoint, will retry in 1s: {}", msg);
1331 tokio::time::sleep(Duration::from_secs(1)).await;
1332 self.metrics.checkpoint_errors.inc();
1333 continue;
1334 }
1335 }
1336
1337 self.notify.notified().await;
1338 }
1339 }
1340
1341 async fn maybe_build_checkpoints(&mut self) -> CheckpointBuilderResult {
1342 if self
1343 .epoch_store
1344 .protocol_config()
1345 .split_checkpoints_in_consensus_handler()
1346 {
1347 self.maybe_build_checkpoints_v2().await
1348 } else {
1349 self.maybe_build_checkpoints_v1().await
1350 }
1351 }
1352
1353 async fn maybe_build_checkpoints_v1(&mut self) -> CheckpointBuilderResult {
1354 let _scope = monitored_scope("BuildCheckpoints");
1355
1356 let summary = self
1358 .epoch_store
1359 .last_built_checkpoint_builder_summary()
1360 .expect("epoch should not have ended");
1361 let mut last_height = summary.clone().and_then(|s| s.checkpoint_height);
1362 let mut last_timestamp = summary.map(|s| s.summary.timestamp_ms);
1363
1364 let min_checkpoint_interval_ms = self
1365 .epoch_store
1366 .protocol_config()
1367 .min_checkpoint_interval_ms_as_option()
1368 .unwrap_or_default();
1369 let mut grouped_pending_checkpoints = Vec::new();
1370 let mut checkpoints_iter = self
1371 .epoch_store
1372 .get_pending_checkpoints(last_height)
1373 .expect("unexpected epoch store error")
1374 .into_iter()
1375 .peekable();
1376 while let Some((height, pending)) = checkpoints_iter.next() {
1377 let current_timestamp = pending.details().timestamp_ms;
1380 let can_build = match last_timestamp {
1381 Some(last_timestamp) => {
1382 current_timestamp >= last_timestamp + min_checkpoint_interval_ms
1383 }
1384 None => true,
1385 } || checkpoints_iter
1388 .peek()
1389 .is_some_and(|(_, next_pending)| next_pending.details().last_of_epoch)
1390 || pending.details().last_of_epoch;
1392 grouped_pending_checkpoints.push(pending);
1393 if !can_build {
1394 debug!(
1395 checkpoint_commit_height = height,
1396 ?last_timestamp,
1397 ?current_timestamp,
1398 "waiting for more PendingCheckpoints: minimum interval not yet elapsed"
1399 );
1400 continue;
1401 }
1402
1403 last_height = Some(height);
1405 last_timestamp = Some(current_timestamp);
1406 debug!(
1407 checkpoint_commit_height_from = grouped_pending_checkpoints
1408 .first()
1409 .unwrap()
1410 .details()
1411 .checkpoint_height,
1412 checkpoint_commit_height_to = last_height,
1413 "Making checkpoint with commit height range"
1414 );
1415
1416 let seq = self
1417 .make_checkpoint(std::mem::take(&mut grouped_pending_checkpoints))
1418 .await?;
1419
1420 self.last_built.send_if_modified(|cur| {
1421 if seq > *cur {
1423 *cur = seq;
1424 true
1425 } else {
1426 false
1427 }
1428 });
1429
1430 tokio::task::yield_now().await;
1433 }
1434 debug!(
1435 "Waiting for more checkpoints from consensus after processing {last_height:?}; {} pending checkpoints left unprocessed until next interval",
1436 grouped_pending_checkpoints.len(),
1437 );
1438
1439 Ok(())
1440 }
1441
1442 async fn maybe_build_checkpoints_v2(&mut self) -> CheckpointBuilderResult {
1443 let _scope = monitored_scope("BuildCheckpoints");
1444
1445 let last_height = self
1447 .epoch_store
1448 .last_built_checkpoint_builder_summary()
1449 .expect("epoch should not have ended")
1450 .and_then(|s| s.checkpoint_height);
1451
1452 for (height, pending) in self
1453 .epoch_store
1454 .get_pending_checkpoints_v2(last_height)
1455 .expect("unexpected epoch store error")
1456 {
1457 info!(checkpoint_commit_height = height, "Making checkpoint");
1458
1459 let seq = self.make_checkpoint_v2(pending).await?;
1460
1461 self.last_built.send_if_modified(|cur| {
1462 if seq > *cur {
1464 *cur = seq;
1465 true
1466 } else {
1467 false
1468 }
1469 });
1470
1471 tokio::task::yield_now().await;
1474 }
1475
1476 Ok(())
1477 }
1478
1479 #[instrument(level = "debug", skip_all, fields(last_height = pendings.last().unwrap().details().checkpoint_height))]
1480 async fn make_checkpoint(
1481 &mut self,
1482 pendings: Vec<PendingCheckpoint>,
1483 ) -> CheckpointBuilderResult<CheckpointSequenceNumber> {
1484 let _scope = monitored_scope("CheckpointBuilder::make_checkpoint");
1485
1486 let pending_ckpt_str = pendings
1487 .iter()
1488 .map(|p| {
1489 format!(
1490 "height={}, commit={}",
1491 p.details().checkpoint_height,
1492 p.details().consensus_commit_ref
1493 )
1494 })
1495 .join("; ");
1496
1497 let last_details = pendings.last().unwrap().details().clone();
1498
1499 let highest_executed_sequence = self
1502 .store
1503 .get_highest_executed_checkpoint_seq_number()
1504 .expect("db error")
1505 .unwrap_or(0);
1506
1507 let (poll_count, result) = poll_count(self.resolve_checkpoint_transactions(pendings)).await;
1508 let (sorted_tx_effects_included_in_checkpoint, all_roots) = result?;
1509
1510 let new_checkpoints = self
1511 .create_checkpoints(
1512 sorted_tx_effects_included_in_checkpoint,
1513 &last_details,
1514 &all_roots,
1515 )
1516 .await?;
1517 let highest_sequence = *new_checkpoints.last().0.sequence_number();
1518 if highest_sequence <= highest_executed_sequence && poll_count > 1 {
1519 debug_fatal!(
1520 "resolve_checkpoint_transactions should be instantaneous when executed checkpoint is ahead of checkpoint builder"
1521 );
1522 }
1523
1524 let new_ckpt_str = new_checkpoints
1525 .iter()
1526 .map(|(ckpt, _)| format!("seq={}, digest={}", ckpt.sequence_number(), ckpt.digest()))
1527 .join("; ");
1528
1529 self.write_checkpoints(last_details.checkpoint_height, new_checkpoints)
1530 .await?;
1531 info!(
1532 "Made new checkpoint {} from pending checkpoint {}",
1533 new_ckpt_str, pending_ckpt_str
1534 );
1535
1536 Ok(highest_sequence)
1537 }
1538
1539 #[instrument(level = "debug", skip_all, fields(height = pending.details.checkpoint_height))]
1540 async fn make_checkpoint_v2(
1541 &mut self,
1542 pending: PendingCheckpointV2,
1543 ) -> CheckpointBuilderResult<CheckpointSequenceNumber> {
1544 let _scope = monitored_scope("CheckpointBuilder::make_checkpoint");
1545
1546 let details = pending.details.clone();
1547
1548 let highest_executed_sequence = self
1549 .store
1550 .get_highest_executed_checkpoint_seq_number()
1551 .expect("db error")
1552 .unwrap_or(0);
1553
1554 let (poll_count, result) =
1555 poll_count(self.resolve_checkpoint_transactions_v2(pending)).await;
1556 let (sorted_tx_effects_included_in_checkpoint, all_roots) = result?;
1557
1558 let new_checkpoints = self
1559 .create_checkpoints(
1560 sorted_tx_effects_included_in_checkpoint,
1561 &details,
1562 &all_roots,
1563 )
1564 .await?;
1565 assert_eq!(new_checkpoints.len(), 1, "Expected exactly one checkpoint");
1566 let sequence = *new_checkpoints.first().0.sequence_number();
1567 let digest = new_checkpoints.first().0.digest();
1568 if sequence <= highest_executed_sequence && poll_count > 1 {
1569 debug_fatal!(
1570 "resolve_checkpoint_transactions should be instantaneous when executed checkpoint is ahead of checkpoint builder"
1571 );
1572 }
1573
1574 self.write_checkpoints(details.checkpoint_height, new_checkpoints)
1575 .await?;
1576 info!(
1577 seq = sequence,
1578 %digest,
1579 height = details.checkpoint_height,
1580 commit = %details.consensus_commit_ref,
1581 "Made new checkpoint"
1582 );
1583
1584 Ok(sequence)
1585 }
1586
1587 async fn construct_and_execute_settlement_transactions(
1588 &self,
1589 sorted_tx_effects_included_in_checkpoint: &[TransactionEffects],
1590 checkpoint_height: CheckpointHeight,
1591 checkpoint_seq: CheckpointSequenceNumber,
1592 tx_index_offset: u64,
1593 ) -> (TransactionKey, Vec<TransactionEffects>) {
1594 let _scope =
1595 monitored_scope("CheckpointBuilder::construct_and_execute_settlement_transactions");
1596
1597 let tx_key =
1598 TransactionKey::AccumulatorSettlement(self.epoch_store.epoch(), checkpoint_height);
1599
1600 let epoch = self.epoch_store.epoch();
1601 let accumulator_root_obj_initial_shared_version = self
1602 .epoch_store
1603 .epoch_start_config()
1604 .accumulator_root_obj_initial_shared_version()
1605 .expect("accumulator root object must exist");
1606
1607 let builder = AccumulatorSettlementTxBuilder::new(
1608 Some(self.effects_store.as_ref()),
1609 sorted_tx_effects_included_in_checkpoint,
1610 checkpoint_seq,
1611 tx_index_offset,
1612 );
1613
1614 let funds_changes = builder.collect_funds_changes();
1615 let num_updates = builder.num_updates();
1616 let settlement_txns = builder.build_tx(
1617 self.epoch_store.protocol_config(),
1618 epoch,
1619 accumulator_root_obj_initial_shared_version,
1620 checkpoint_height,
1621 checkpoint_seq,
1622 );
1623
1624 let settlement_txns: Vec<_> = settlement_txns
1625 .into_iter()
1626 .map(|tx| {
1627 VerifiedExecutableTransaction::new_system(
1628 VerifiedTransaction::new_system_transaction(tx),
1629 self.epoch_store.epoch(),
1630 )
1631 })
1632 .collect();
1633
1634 let settlement_digests: Vec<_> = settlement_txns.iter().map(|tx| *tx.digest()).collect();
1635
1636 debug!(
1637 ?settlement_digests,
1638 ?tx_key,
1639 "created settlement transactions with {num_updates} updates"
1640 );
1641
1642 self.epoch_store
1643 .notify_settlement_transactions_ready(tx_key, settlement_txns);
1644
1645 let settlement_effects = wait_for_effects_with_retry(
1646 self.effects_store.as_ref(),
1647 "CheckpointBuilder::notify_read_settlement_effects",
1648 &settlement_digests,
1649 tx_key,
1650 )
1651 .await;
1652
1653 let barrier_tx = accumulators::build_accumulator_barrier_tx(
1654 epoch,
1655 accumulator_root_obj_initial_shared_version,
1656 checkpoint_height,
1657 &settlement_effects,
1658 );
1659
1660 let barrier_tx = VerifiedExecutableTransaction::new_system(
1661 VerifiedTransaction::new_system_transaction(barrier_tx),
1662 self.epoch_store.epoch(),
1663 );
1664 let barrier_digest = *barrier_tx.digest();
1665
1666 self.epoch_store
1667 .notify_barrier_transaction_ready(tx_key, barrier_tx);
1668
1669 let barrier_effects = wait_for_effects_with_retry(
1670 self.effects_store.as_ref(),
1671 "CheckpointBuilder::notify_read_barrier_effects",
1672 &[barrier_digest],
1673 tx_key,
1674 )
1675 .await;
1676
1677 let settlement_effects: Vec<_> = settlement_effects
1678 .into_iter()
1679 .chain(barrier_effects)
1680 .collect();
1681
1682 let mut next_accumulator_version = None;
1683 for fx in settlement_effects.iter() {
1684 assert!(
1685 fx.status().is_ok(),
1686 "settlement transaction cannot fail (digest: {:?}) {:#?}",
1687 fx.transaction_digest(),
1688 fx
1689 );
1690 if let Some(version) = fx
1691 .mutated()
1692 .iter()
1693 .find_map(|(oref, _)| (oref.0 == SUI_ACCUMULATOR_ROOT_OBJECT_ID).then_some(oref.1))
1694 {
1695 assert!(
1696 next_accumulator_version.is_none(),
1697 "Only one settlement transaction should mutate the accumulator root object"
1698 );
1699 next_accumulator_version = Some(version);
1700 }
1701 }
1702 let settlements = FundsSettlement {
1703 next_accumulator_version: next_accumulator_version
1704 .expect("Accumulator root object should be mutated in the settlement transactions"),
1705 funds_changes,
1706 };
1707
1708 self.state
1709 .execution_scheduler()
1710 .settle_address_funds(settlements);
1711
1712 (tx_key, settlement_effects)
1713 }
1714
1715 #[instrument(level = "debug", skip_all)]
1720 async fn resolve_checkpoint_transactions(
1721 &self,
1722 pending_checkpoints: Vec<PendingCheckpoint>,
1723 ) -> SuiResult<(Vec<TransactionEffects>, HashSet<TransactionDigest>)> {
1724 let _scope = monitored_scope("CheckpointBuilder::resolve_checkpoint_transactions");
1725
1726 let mut effects_in_current_checkpoint = BTreeSet::new();
1731
1732 let mut tx_effects = Vec::new();
1733 let mut tx_roots = HashSet::new();
1734
1735 for pending_checkpoint in pending_checkpoints.into_iter() {
1736 let mut pending = pending_checkpoint;
1737 debug!(
1738 checkpoint_commit_height = pending.details.checkpoint_height,
1739 "Resolving checkpoint transactions for pending checkpoint.",
1740 );
1741
1742 trace!(
1743 "roots for pending checkpoint {:?}: {:?}",
1744 pending.details.checkpoint_height, pending.roots,
1745 );
1746
1747 let settlement_root = if self.epoch_store.accumulators_enabled() {
1748 let Some(settlement_root @ TransactionKey::AccumulatorSettlement(..)) =
1749 pending.roots.pop()
1750 else {
1751 fatal!("No settlement root found");
1752 };
1753 Some(settlement_root)
1754 } else {
1755 None
1756 };
1757
1758 let roots = &pending.roots;
1759
1760 self.metrics
1761 .checkpoint_roots_count
1762 .inc_by(roots.len() as u64);
1763
1764 let root_digests = self
1765 .epoch_store
1766 .notify_read_tx_key_to_digest(roots)
1767 .in_monitored_scope("CheckpointNotifyDigests")
1768 .await?;
1769 let root_effects = self
1770 .effects_store
1771 .notify_read_executed_effects(
1772 CHECKPOINT_BUILDER_NOTIFY_READ_TASK_NAME,
1773 &root_digests,
1774 )
1775 .in_monitored_scope("CheckpointNotifyRead")
1776 .await;
1777
1778 assert!(
1779 self.epoch_store
1780 .protocol_config()
1781 .prepend_prologue_tx_in_consensus_commit_in_checkpoints()
1782 );
1783
1784 let consensus_commit_prologue =
1787 self.extract_consensus_commit_prologue(&root_digests, &root_effects)?;
1788
1789 if let Some((ccp_digest, ccp_effects)) = &consensus_commit_prologue {
1792 let unsorted_ccp = self.complete_checkpoint_effects(
1793 vec![ccp_effects.clone()],
1794 &mut effects_in_current_checkpoint,
1795 )?;
1796
1797 if unsorted_ccp.len() != 1 {
1800 fatal!(
1801 "Expected 1 consensus commit prologue, got {:?}",
1802 unsorted_ccp
1803 .iter()
1804 .map(|e| e.transaction_digest())
1805 .collect::<Vec<_>>()
1806 );
1807 }
1808 assert_eq!(unsorted_ccp.len(), 1);
1809 assert_eq!(unsorted_ccp[0].transaction_digest(), ccp_digest);
1810 }
1811
1812 let unsorted =
1813 self.complete_checkpoint_effects(root_effects, &mut effects_in_current_checkpoint)?;
1814
1815 let _scope = monitored_scope("CheckpointBuilder::causal_sort");
1816 let mut sorted: Vec<TransactionEffects> = Vec::with_capacity(unsorted.len() + 1);
1817 if let Some((ccp_digest, ccp_effects)) = consensus_commit_prologue {
1818 if cfg!(debug_assertions) {
1819 for tx in unsorted.iter() {
1821 assert!(tx.transaction_digest() != &ccp_digest);
1822 }
1823 }
1824 sorted.push(ccp_effects);
1825 }
1826 sorted.extend(CausalOrder::causal_sort(unsorted));
1827
1828 if let Some(settlement_root) = settlement_root {
1829 let last_checkpoint =
1832 Self::load_last_built_checkpoint_summary(&self.epoch_store, &self.store)?;
1833 let next_checkpoint_seq = last_checkpoint
1834 .as_ref()
1835 .map(|(seq, _)| *seq)
1836 .unwrap_or_default()
1837 + 1;
1838 let tx_index_offset = tx_effects.len() as u64;
1839
1840 let (tx_key, settlement_effects) = self
1841 .construct_and_execute_settlement_transactions(
1842 &sorted,
1843 pending.details.checkpoint_height,
1844 next_checkpoint_seq,
1845 tx_index_offset,
1846 )
1847 .await;
1848 debug!(?tx_key, "executed settlement transactions");
1849
1850 assert_eq!(settlement_root, tx_key);
1851
1852 sorted.extend(settlement_effects);
1859 }
1860
1861 #[cfg(msim)]
1862 {
1863 self.expensive_consensus_commit_prologue_invariants_check(&root_digests, &sorted);
1865 }
1866
1867 tx_effects.extend(sorted);
1868 tx_roots.extend(root_digests);
1869 }
1870
1871 Ok((tx_effects, tx_roots))
1872 }
1873
1874 #[instrument(level = "debug", skip_all)]
1877 async fn resolve_checkpoint_transactions_v2(
1878 &self,
1879 pending: PendingCheckpointV2,
1880 ) -> SuiResult<(Vec<TransactionEffects>, HashSet<TransactionDigest>)> {
1881 let _scope = monitored_scope("CheckpointBuilder::resolve_checkpoint_transactions");
1882
1883 let mut effects_in_current_checkpoint = BTreeSet::new();
1884 debug!(
1885 checkpoint_commit_height = pending.details.checkpoint_height,
1886 "Resolving checkpoint transactions for pending checkpoint.",
1887 );
1888
1889 trace!(
1890 "roots for pending checkpoint {:?}: {:?}",
1891 pending.details.checkpoint_height, pending.roots,
1892 );
1893
1894 assert!(
1895 self.epoch_store
1896 .protocol_config()
1897 .prepend_prologue_tx_in_consensus_commit_in_checkpoints()
1898 );
1899
1900 let mut sorted: Vec<TransactionEffects> = Vec::new();
1901 let mut all_root_digests: Vec<TransactionDigest> = Vec::new();
1902
1903 let last_checkpoint =
1904 Self::load_last_built_checkpoint_summary(&self.epoch_store, &self.store)?;
1905 let next_checkpoint_seq = last_checkpoint
1906 .as_ref()
1907 .map(|(seq, _)| *seq)
1908 .unwrap_or_default()
1909 + 1;
1910
1911 for checkpoint_roots in &pending.roots {
1912 let tx_roots = &checkpoint_roots.tx_roots;
1913
1914 self.metrics
1915 .checkpoint_roots_count
1916 .inc_by(tx_roots.len() as u64);
1917
1918 let root_digests = self
1919 .epoch_store
1920 .notify_read_tx_key_to_digest(tx_roots)
1921 .in_monitored_scope("CheckpointNotifyDigests")
1922 .await?;
1923
1924 all_root_digests.extend(root_digests.iter().cloned());
1925
1926 let root_effects = self
1927 .effects_store
1928 .notify_read_executed_effects(
1929 CHECKPOINT_BUILDER_NOTIFY_READ_TASK_NAME,
1930 &root_digests,
1931 )
1932 .in_monitored_scope("CheckpointNotifyRead")
1933 .await;
1934
1935 let consensus_commit_prologue = {
1936 let ccp = self.extract_consensus_commit_prologue(&root_digests, &root_effects)?;
1937
1938 if let Some((ccp_digest, ccp_effects)) = &ccp {
1939 let unsorted_ccp = self.complete_checkpoint_effects(
1940 vec![ccp_effects.clone()],
1941 &mut effects_in_current_checkpoint,
1942 )?;
1943
1944 if unsorted_ccp.is_empty() {
1945 None
1949 } else if unsorted_ccp.len() != 1 {
1950 fatal!(
1951 "Expected 1 consensus commit prologue, got {:?}",
1952 unsorted_ccp
1953 .iter()
1954 .map(|e| e.transaction_digest())
1955 .collect::<Vec<_>>()
1956 );
1957 } else {
1958 assert_eq!(unsorted_ccp[0].transaction_digest(), ccp_digest);
1959 ccp.clone()
1960 }
1961 } else {
1962 None
1963 }
1964 };
1965
1966 let unsorted =
1967 self.complete_checkpoint_effects(root_effects, &mut effects_in_current_checkpoint)?;
1968
1969 let _scope = monitored_scope("CheckpointBuilder::causal_sort");
1970 if let Some((ccp_digest, ccp_effects)) = consensus_commit_prologue {
1971 if cfg!(debug_assertions) {
1972 for tx in unsorted.iter() {
1973 assert!(tx.transaction_digest() != &ccp_digest);
1974 }
1975 }
1976 sorted.push(ccp_effects);
1977 }
1978 sorted.extend(CausalOrder::causal_sort(unsorted));
1979
1980 if checkpoint_roots.settlement_root.is_some() {
1981 let (tx_key, settlement_effects) = self
1982 .construct_and_execute_settlement_transactions(
1983 &sorted,
1984 checkpoint_roots.height,
1985 next_checkpoint_seq,
1986 0, )
1989 .await;
1990 debug!(?tx_key, "executed settlement transactions");
1991
1992 sorted.extend(settlement_effects);
1993 }
1994 }
1995
1996 #[cfg(msim)]
1997 {
1998 self.expensive_consensus_commit_prologue_invariants_check_v2(
1999 &all_root_digests,
2000 &sorted,
2001 );
2002 }
2003 Ok((sorted, all_root_digests.into_iter().collect()))
2004 }
2005
2006 fn extract_consensus_commit_prologue(
2009 &self,
2010 root_digests: &[TransactionDigest],
2011 root_effects: &[TransactionEffects],
2012 ) -> SuiResult<Option<(TransactionDigest, TransactionEffects)>> {
2013 let _scope = monitored_scope("CheckpointBuilder::extract_consensus_commit_prologue");
2014 if root_digests.is_empty() {
2015 return Ok(None);
2016 }
2017
2018 let first_tx = self
2022 .state
2023 .get_transaction_cache_reader()
2024 .get_transaction_block(&root_digests[0])
2025 .expect("Transaction block must exist");
2026
2027 Ok(first_tx
2028 .transaction_data()
2029 .is_consensus_commit_prologue()
2030 .then(|| {
2031 assert_eq!(first_tx.digest(), root_effects[0].transaction_digest());
2032 (*first_tx.digest(), root_effects[0].clone())
2033 }))
2034 }
2035
2036 #[instrument(level = "debug", skip_all)]
2037 async fn write_checkpoints(
2038 &mut self,
2039 height: CheckpointHeight,
2040 new_checkpoints: NonEmpty<(CheckpointSummary, CheckpointContents)>,
2041 ) -> SuiResult {
2042 let _scope = monitored_scope("CheckpointBuilder::write_checkpoints");
2043 let mut batch = self.store.tables.checkpoint_content.batch();
2044 let mut all_tx_digests =
2045 Vec::with_capacity(new_checkpoints.iter().map(|(_, c)| c.size()).sum());
2046
2047 for (summary, contents) in &new_checkpoints {
2048 debug!(
2049 checkpoint_commit_height = height,
2050 checkpoint_seq = summary.sequence_number,
2051 contents_digest = ?contents.digest(),
2052 "writing checkpoint",
2053 );
2054
2055 if let Some(previously_computed_summary) = self
2056 .store
2057 .tables
2058 .locally_computed_checkpoints
2059 .get(&summary.sequence_number)?
2060 && previously_computed_summary.digest() != summary.digest()
2061 {
2062 fatal!(
2063 "Checkpoint {} was previously built with a different result: previously_computed_summary {:?} vs current_summary {:?}",
2064 summary.sequence_number,
2065 previously_computed_summary.digest(),
2066 summary.digest()
2067 );
2068 }
2069
2070 all_tx_digests.extend(contents.iter().map(|digests| digests.transaction));
2071
2072 self.metrics
2073 .transactions_included_in_checkpoint
2074 .inc_by(contents.size() as u64);
2075 let sequence_number = summary.sequence_number;
2076 self.metrics
2077 .last_constructed_checkpoint
2078 .set(sequence_number as i64);
2079
2080 batch.insert_batch(
2081 &self.store.tables.checkpoint_content,
2082 [(contents.digest(), contents)],
2083 )?;
2084
2085 batch.insert_batch(
2086 &self.store.tables.locally_computed_checkpoints,
2087 [(sequence_number, summary)],
2088 )?;
2089 }
2090
2091 batch.write()?;
2092
2093 for (summary, contents) in &new_checkpoints {
2095 self.output
2096 .checkpoint_created(summary, contents, &self.epoch_store, &self.store)
2097 .await?;
2098 }
2099
2100 for (local_checkpoint, _) in &new_checkpoints {
2101 if let Some(certified_checkpoint) = self
2102 .store
2103 .tables
2104 .certified_checkpoints
2105 .get(local_checkpoint.sequence_number())?
2106 {
2107 self.store
2108 .check_for_checkpoint_fork(local_checkpoint, &certified_checkpoint.into());
2109 }
2110 }
2111
2112 self.notify_aggregator.notify_one();
2113 self.epoch_store
2114 .process_constructed_checkpoint(height, new_checkpoints);
2115 Ok(())
2116 }
2117
2118 #[allow(clippy::type_complexity)]
2119 fn split_checkpoint_chunks(
2120 &self,
2121 effects_and_transaction_sizes: Vec<(TransactionEffects, usize)>,
2122 signatures: Vec<Vec<(GenericSignature, Option<SequenceNumber>)>>,
2123 ) -> CheckpointBuilderResult<
2124 Vec<
2125 Vec<(
2126 TransactionEffects,
2127 Vec<(GenericSignature, Option<SequenceNumber>)>,
2128 )>,
2129 >,
2130 > {
2131 let _guard = monitored_scope("CheckpointBuilder::split_checkpoint_chunks");
2132
2133 if self
2135 .epoch_store
2136 .protocol_config()
2137 .split_checkpoints_in_consensus_handler()
2138 {
2139 let chunk: Vec<_> = effects_and_transaction_sizes
2140 .into_iter()
2141 .zip(signatures)
2142 .map(|((effects, _size), sigs)| (effects, sigs))
2143 .collect();
2144 return Ok(vec![chunk]);
2145 }
2146 let mut chunks = Vec::new();
2147 let mut chunk = Vec::new();
2148 let mut chunk_size: usize = 0;
2149 for ((effects, transaction_size), signatures) in effects_and_transaction_sizes
2150 .into_iter()
2151 .zip(signatures.into_iter())
2152 {
2153 let signatures_size = if self.epoch_store.protocol_config().address_aliases() {
2158 bcs::serialized_size(&signatures)?
2159 } else {
2160 let signatures: Vec<&GenericSignature> =
2161 signatures.iter().map(|(s, _)| s).collect();
2162 bcs::serialized_size(&signatures)?
2163 };
2164 let size = transaction_size + bcs::serialized_size(&effects)? + signatures_size;
2165 if chunk.len() == self.max_transactions_per_checkpoint
2166 || (chunk_size + size) > self.max_checkpoint_size_bytes
2167 {
2168 if chunk.is_empty() {
2169 warn!(
2171 "Size of single transaction ({size}) exceeds max checkpoint size ({}); allowing excessively large checkpoint to go through.",
2172 self.max_checkpoint_size_bytes
2173 );
2174 } else {
2175 chunks.push(chunk);
2176 chunk = Vec::new();
2177 chunk_size = 0;
2178 }
2179 }
2180
2181 chunk.push((effects, signatures));
2182 chunk_size += size;
2183 }
2184
2185 if !chunk.is_empty() || chunks.is_empty() {
2186 chunks.push(chunk);
2191 }
2196 Ok(chunks)
2197 }
2198
2199 fn load_last_built_checkpoint_summary(
2200 epoch_store: &AuthorityPerEpochStore,
2201 store: &CheckpointStore,
2202 ) -> SuiResult<Option<(CheckpointSequenceNumber, CheckpointSummary)>> {
2203 let mut last_checkpoint = epoch_store.last_built_checkpoint_summary()?;
2204 if last_checkpoint.is_none() {
2205 let epoch = epoch_store.epoch();
2206 if epoch > 0 {
2207 let previous_epoch = epoch - 1;
2208 let last_verified = store.get_epoch_last_checkpoint(previous_epoch)?;
2209 last_checkpoint = last_verified.map(VerifiedCheckpoint::into_summary_and_sequence);
2210 if let Some((ref seq, _)) = last_checkpoint {
2211 debug!(
2212 "No checkpoints in builder DB, taking checkpoint from previous epoch with sequence {seq}"
2213 );
2214 } else {
2215 panic!("Can not find last checkpoint for previous epoch {previous_epoch}");
2217 }
2218 }
2219 }
2220 Ok(last_checkpoint)
2221 }
2222
2223 #[instrument(level = "debug", skip_all)]
2224 async fn create_checkpoints(
2225 &self,
2226 all_effects: Vec<TransactionEffects>,
2227 details: &PendingCheckpointInfo,
2228 all_roots: &HashSet<TransactionDigest>,
2229 ) -> CheckpointBuilderResult<NonEmpty<(CheckpointSummary, CheckpointContents)>> {
2230 let _scope = monitored_scope("CheckpointBuilder::create_checkpoints");
2231
2232 let total = all_effects.len();
2233 let mut last_checkpoint =
2234 Self::load_last_built_checkpoint_summary(&self.epoch_store, &self.store)?;
2235 let last_checkpoint_seq = last_checkpoint.as_ref().map(|(seq, _)| *seq);
2236 info!(
2237 checkpoint_commit_height = details.checkpoint_height,
2238 next_checkpoint_seq = last_checkpoint_seq.unwrap_or_default() + 1,
2239 checkpoint_timestamp = details.timestamp_ms,
2240 "Creating checkpoint(s) for {} transactions",
2241 all_effects.len(),
2242 );
2243
2244 let all_digests: Vec<_> = all_effects
2245 .iter()
2246 .map(|effect| *effect.transaction_digest())
2247 .collect();
2248 let transactions_and_sizes = self
2249 .state
2250 .get_transaction_cache_reader()
2251 .get_transactions_and_serialized_sizes(&all_digests)?;
2252 let mut all_effects_and_transaction_sizes = Vec::with_capacity(all_effects.len());
2253 let mut transactions = Vec::with_capacity(all_effects.len());
2254 let mut transaction_keys = Vec::with_capacity(all_effects.len());
2255 let mut randomness_rounds = BTreeMap::new();
2256 {
2257 let _guard = monitored_scope("CheckpointBuilder::wait_for_transactions_sequenced");
2258 debug!(
2259 ?last_checkpoint_seq,
2260 "Waiting for {:?} certificates to appear in consensus",
2261 all_effects.len()
2262 );
2263
2264 for (effects, transaction_and_size) in all_effects
2265 .into_iter()
2266 .zip(transactions_and_sizes.into_iter())
2267 {
2268 let (transaction, size) = transaction_and_size
2269 .unwrap_or_else(|| panic!("Could not find executed transaction {:?}", effects));
2270 match transaction.inner().transaction_data().kind() {
2271 TransactionKind::ConsensusCommitPrologue(_)
2272 | TransactionKind::ConsensusCommitPrologueV2(_)
2273 | TransactionKind::ConsensusCommitPrologueV3(_)
2274 | TransactionKind::ConsensusCommitPrologueV4(_)
2275 | TransactionKind::AuthenticatorStateUpdate(_) => {
2276 }
2279 TransactionKind::ProgrammableSystemTransaction(_) => {
2280 }
2282 TransactionKind::ChangeEpoch(_)
2283 | TransactionKind::Genesis(_)
2284 | TransactionKind::EndOfEpochTransaction(_) => {
2285 fatal!(
2286 "unexpected transaction in checkpoint effects: {:?}",
2287 transaction
2288 );
2289 }
2290 TransactionKind::RandomnessStateUpdate(rsu) => {
2291 randomness_rounds
2292 .insert(*effects.transaction_digest(), rsu.randomness_round);
2293 }
2294 TransactionKind::ProgrammableTransaction(_) => {
2295 let digest = *effects.transaction_digest();
2299 if !all_roots.contains(&digest) {
2300 transaction_keys.push(SequencedConsensusTransactionKey::External(
2301 ConsensusTransactionKey::Certificate(digest),
2302 ));
2303 }
2304 }
2305 }
2306 transactions.push(transaction);
2307 all_effects_and_transaction_sizes.push((effects, size));
2308 }
2309
2310 self.epoch_store
2311 .consensus_messages_processed_notify(transaction_keys)
2312 .await?;
2313 }
2314
2315 let signatures = self
2316 .epoch_store
2317 .user_signatures_for_checkpoint(&transactions, &all_digests);
2318 debug!(
2319 ?last_checkpoint_seq,
2320 "Received {} checkpoint user signatures from consensus",
2321 signatures.len()
2322 );
2323
2324 let mut end_of_epoch_observation_keys: Option<Vec<_>> = if details.last_of_epoch {
2325 Some(
2326 transactions
2327 .iter()
2328 .flat_map(|tx| {
2329 if let TransactionKind::ProgrammableTransaction(ptb) =
2330 tx.transaction_data().kind()
2331 {
2332 itertools::Either::Left(
2333 ptb.commands
2334 .iter()
2335 .map(ExecutionTimeObservationKey::from_command),
2336 )
2337 } else {
2338 itertools::Either::Right(std::iter::empty())
2339 }
2340 })
2341 .collect(),
2342 )
2343 } else {
2344 None
2345 };
2346
2347 let chunks = self.split_checkpoint_chunks(all_effects_and_transaction_sizes, signatures)?;
2348 let chunks_count = chunks.len();
2349
2350 let mut checkpoints = Vec::with_capacity(chunks_count);
2351 debug!(
2352 ?last_checkpoint_seq,
2353 "Creating {} checkpoints with {} transactions", chunks_count, total,
2354 );
2355
2356 let epoch = self.epoch_store.epoch();
2357 for (index, transactions) in chunks.into_iter().enumerate() {
2358 let first_checkpoint_of_epoch = index == 0
2359 && last_checkpoint
2360 .as_ref()
2361 .map(|(_, c)| c.epoch != epoch)
2362 .unwrap_or(true);
2363 if first_checkpoint_of_epoch {
2364 self.epoch_store
2365 .record_epoch_first_checkpoint_creation_time_metric();
2366 }
2367 let last_checkpoint_of_epoch = details.last_of_epoch && index == chunks_count - 1;
2368
2369 let sequence_number = last_checkpoint
2370 .as_ref()
2371 .map(|(_, c)| c.sequence_number + 1)
2372 .unwrap_or_default();
2373 let mut timestamp_ms = details.timestamp_ms;
2374 if let Some((_, last_checkpoint)) = &last_checkpoint
2375 && last_checkpoint.timestamp_ms > timestamp_ms
2376 {
2377 debug!(
2379 "Decrease of checkpoint timestamp, possibly due to epoch change. Sequence: {}, previous: {}, current: {}",
2380 sequence_number, last_checkpoint.timestamp_ms, timestamp_ms,
2381 );
2382 if self
2383 .epoch_store
2384 .protocol_config()
2385 .enforce_checkpoint_timestamp_monotonicity()
2386 {
2387 timestamp_ms = last_checkpoint.timestamp_ms;
2388 }
2389 }
2390
2391 let (mut effects, mut signatures): (Vec<_>, Vec<_>) = transactions.into_iter().unzip();
2392 let epoch_rolling_gas_cost_summary =
2393 self.get_epoch_total_gas_cost(last_checkpoint.as_ref().map(|(_, c)| c), &effects);
2394
2395 let end_of_epoch_data = if last_checkpoint_of_epoch {
2396 let system_state_obj = self
2397 .augment_epoch_last_checkpoint(
2398 &epoch_rolling_gas_cost_summary,
2399 timestamp_ms,
2400 &mut effects,
2401 &mut signatures,
2402 sequence_number,
2403 std::mem::take(&mut end_of_epoch_observation_keys).expect("end_of_epoch_observation_keys must be populated for the last checkpoint"),
2404 last_checkpoint_seq.unwrap_or_default(),
2405 )
2406 .await?;
2407
2408 let committee = system_state_obj
2409 .get_current_epoch_committee()
2410 .committee()
2411 .clone();
2412
2413 let root_state_digest = {
2416 let state_acc = self
2417 .global_state_hasher
2418 .upgrade()
2419 .expect("No checkpoints should be getting built after local configuration");
2420 let acc = state_acc.accumulate_checkpoint(
2421 &effects,
2422 sequence_number,
2423 &self.epoch_store,
2424 )?;
2425
2426 state_acc
2427 .wait_for_previous_running_root(&self.epoch_store, sequence_number)
2428 .await?;
2429
2430 state_acc.accumulate_running_root(
2431 &self.epoch_store,
2432 sequence_number,
2433 Some(acc),
2434 )?;
2435 state_acc
2436 .digest_epoch(self.epoch_store.clone(), sequence_number)
2437 .await?
2438 };
2439 self.metrics.highest_accumulated_epoch.set(epoch as i64);
2440 info!("Epoch {epoch} root state hash digest: {root_state_digest:?}");
2441
2442 let epoch_commitments = if self
2443 .epoch_store
2444 .protocol_config()
2445 .check_commit_root_state_digest_supported()
2446 {
2447 vec![root_state_digest.into()]
2448 } else {
2449 vec![]
2450 };
2451
2452 Some(EndOfEpochData {
2453 next_epoch_committee: committee.voting_rights,
2454 next_epoch_protocol_version: ProtocolVersion::new(
2455 system_state_obj.protocol_version(),
2456 ),
2457 epoch_commitments,
2458 })
2459 } else {
2460 self.send_to_hasher
2461 .send((sequence_number, effects.clone()))
2462 .await?;
2463
2464 None
2465 };
2466 let contents = if self.epoch_store.protocol_config().address_aliases() {
2467 CheckpointContents::new_v2(&effects, signatures)
2468 } else {
2469 CheckpointContents::new_with_digests_and_signatures(
2470 effects.iter().map(TransactionEffects::execution_digests),
2471 signatures
2472 .into_iter()
2473 .map(|sigs| sigs.into_iter().map(|(s, _)| s).collect())
2474 .collect(),
2475 )
2476 };
2477
2478 let num_txns = contents.size() as u64;
2479
2480 let network_total_transactions = last_checkpoint
2481 .as_ref()
2482 .map(|(_, c)| c.network_total_transactions + num_txns)
2483 .unwrap_or(num_txns);
2484
2485 let previous_digest = last_checkpoint.as_ref().map(|(_, c)| c.digest());
2486
2487 let matching_randomness_rounds: Vec<_> = effects
2488 .iter()
2489 .filter_map(|e| randomness_rounds.get(e.transaction_digest()))
2490 .copied()
2491 .collect();
2492
2493 let checkpoint_commitments = if self
2494 .epoch_store
2495 .protocol_config()
2496 .include_checkpoint_artifacts_digest_in_summary()
2497 {
2498 let artifacts = CheckpointArtifacts::from(&effects[..]);
2499 let artifacts_digest = artifacts.digest()?;
2500 vec![artifacts_digest.into()]
2501 } else {
2502 Default::default()
2503 };
2504
2505 let summary = CheckpointSummary::new(
2506 self.epoch_store.protocol_config(),
2507 epoch,
2508 sequence_number,
2509 network_total_transactions,
2510 &contents,
2511 previous_digest,
2512 epoch_rolling_gas_cost_summary,
2513 end_of_epoch_data,
2514 timestamp_ms,
2515 matching_randomness_rounds,
2516 checkpoint_commitments,
2517 );
2518 summary.report_checkpoint_age(
2519 &self.metrics.last_created_checkpoint_age,
2520 &self.metrics.last_created_checkpoint_age_ms,
2521 );
2522 if last_checkpoint_of_epoch {
2523 info!(
2524 checkpoint_seq = sequence_number,
2525 "creating last checkpoint of epoch {}", epoch
2526 );
2527 if let Some(stats) = self.store.get_epoch_stats(epoch, &summary) {
2528 self.epoch_store
2529 .report_epoch_metrics_at_last_checkpoint(stats);
2530 }
2531 }
2532 last_checkpoint = Some((sequence_number, summary.clone()));
2533 checkpoints.push((summary, contents));
2534 }
2535
2536 Ok(NonEmpty::from_vec(checkpoints).expect("at least one checkpoint"))
2537 }
2538
2539 fn get_epoch_total_gas_cost(
2540 &self,
2541 last_checkpoint: Option<&CheckpointSummary>,
2542 cur_checkpoint_effects: &[TransactionEffects],
2543 ) -> GasCostSummary {
2544 let (previous_epoch, previous_gas_costs) = last_checkpoint
2545 .map(|c| (c.epoch, c.epoch_rolling_gas_cost_summary.clone()))
2546 .unwrap_or_default();
2547 let current_gas_costs = GasCostSummary::new_from_txn_effects(cur_checkpoint_effects.iter());
2548 if previous_epoch == self.epoch_store.epoch() {
2549 GasCostSummary::new(
2551 previous_gas_costs.computation_cost + current_gas_costs.computation_cost,
2552 previous_gas_costs.storage_cost + current_gas_costs.storage_cost,
2553 previous_gas_costs.storage_rebate + current_gas_costs.storage_rebate,
2554 previous_gas_costs.non_refundable_storage_fee
2555 + current_gas_costs.non_refundable_storage_fee,
2556 )
2557 } else {
2558 current_gas_costs
2559 }
2560 }
2561
2562 #[instrument(level = "error", skip_all)]
2563 async fn augment_epoch_last_checkpoint(
2564 &self,
2565 epoch_total_gas_cost: &GasCostSummary,
2566 epoch_start_timestamp_ms: CheckpointTimestamp,
2567 checkpoint_effects: &mut Vec<TransactionEffects>,
2568 signatures: &mut Vec<Vec<(GenericSignature, Option<SequenceNumber>)>>,
2569 checkpoint: CheckpointSequenceNumber,
2570 end_of_epoch_observation_keys: Vec<ExecutionTimeObservationKey>,
2571 last_checkpoint: CheckpointSequenceNumber,
2574 ) -> CheckpointBuilderResult<SuiSystemState> {
2575 let (system_state, effects) = self
2576 .state
2577 .create_and_execute_advance_epoch_tx(
2578 &self.epoch_store,
2579 epoch_total_gas_cost,
2580 checkpoint,
2581 epoch_start_timestamp_ms,
2582 end_of_epoch_observation_keys,
2583 last_checkpoint,
2584 )
2585 .await?;
2586 checkpoint_effects.push(effects);
2587 signatures.push(vec![]);
2588 Ok(system_state)
2589 }
2590
2591 #[instrument(level = "debug", skip_all)]
2598 fn complete_checkpoint_effects(
2599 &self,
2600 mut roots: Vec<TransactionEffects>,
2601 existing_tx_digests_in_checkpoint: &mut BTreeSet<TransactionDigest>,
2602 ) -> SuiResult<Vec<TransactionEffects>> {
2603 let _scope = monitored_scope("CheckpointBuilder::complete_checkpoint_effects");
2604 let mut results = vec![];
2605 let mut seen = HashSet::new();
2606 loop {
2607 let mut pending = HashSet::new();
2608
2609 let transactions_included = self
2610 .epoch_store
2611 .builder_included_transactions_in_checkpoint(
2612 roots.iter().map(|e| e.transaction_digest()),
2613 )?;
2614
2615 for (effect, tx_included) in roots.into_iter().zip(transactions_included.into_iter()) {
2616 let digest = effect.transaction_digest();
2617 seen.insert(*digest);
2619
2620 if existing_tx_digests_in_checkpoint.contains(effect.transaction_digest()) {
2622 continue;
2623 }
2624
2625 if tx_included || effect.executed_epoch() < self.epoch_store.epoch() {
2627 continue;
2628 }
2629
2630 let existing_effects = self
2631 .epoch_store
2632 .transactions_executed_in_cur_epoch(effect.dependencies())?;
2633
2634 for (dependency, effects_signature_exists) in
2635 effect.dependencies().iter().zip(existing_effects.iter())
2636 {
2637 if !effects_signature_exists {
2642 continue;
2643 }
2644 if seen.insert(*dependency) {
2645 pending.insert(*dependency);
2646 }
2647 }
2648 results.push(effect);
2649 }
2650 if pending.is_empty() {
2651 break;
2652 }
2653 let pending = pending.into_iter().collect::<Vec<_>>();
2654 let effects = self.effects_store.multi_get_executed_effects(&pending);
2655 let effects = effects
2656 .into_iter()
2657 .zip(pending)
2658 .map(|(opt, digest)| match opt {
2659 Some(x) => x,
2660 None => panic!(
2661 "Can not find effect for transaction {:?}, however transaction that depend on it was already executed",
2662 digest
2663 ),
2664 })
2665 .collect::<Vec<_>>();
2666 roots = effects;
2667 }
2668
2669 existing_tx_digests_in_checkpoint.extend(results.iter().map(|e| e.transaction_digest()));
2670 Ok(results)
2671 }
2672
2673 #[cfg(msim)]
2676 fn expensive_consensus_commit_prologue_invariants_check(
2677 &self,
2678 root_digests: &[TransactionDigest],
2679 sorted: &[TransactionEffects],
2680 ) {
2681 let root_txs = self
2683 .state
2684 .get_transaction_cache_reader()
2685 .multi_get_transaction_blocks(root_digests);
2686 let ccps = root_txs
2687 .iter()
2688 .filter_map(|tx| {
2689 if let Some(tx) = tx {
2690 if tx.transaction_data().is_consensus_commit_prologue() {
2691 Some(tx)
2692 } else {
2693 None
2694 }
2695 } else {
2696 None
2697 }
2698 })
2699 .collect::<Vec<_>>();
2700
2701 assert!(ccps.len() <= 1);
2703
2704 let txs = self
2706 .state
2707 .get_transaction_cache_reader()
2708 .multi_get_transaction_blocks(
2709 &sorted
2710 .iter()
2711 .map(|tx| tx.transaction_digest().clone())
2712 .collect::<Vec<_>>(),
2713 );
2714
2715 if ccps.len() == 0 {
2716 for tx in txs.iter() {
2719 if let Some(tx) = tx {
2720 assert!(!tx.transaction_data().is_consensus_commit_prologue());
2721 }
2722 }
2723 } else {
2724 assert!(
2726 txs[0]
2727 .as_ref()
2728 .unwrap()
2729 .transaction_data()
2730 .is_consensus_commit_prologue()
2731 );
2732
2733 assert_eq!(ccps[0].digest(), txs[0].as_ref().unwrap().digest());
2734
2735 for tx in txs.iter().skip(1) {
2736 if let Some(tx) = tx {
2737 assert!(!tx.transaction_data().is_consensus_commit_prologue());
2738 }
2739 }
2740 }
2741 }
2742
2743 #[cfg(msim)]
2744 fn expensive_consensus_commit_prologue_invariants_check_v2(
2745 &self,
2746 root_digests: &[TransactionDigest],
2747 sorted: &[TransactionEffects],
2748 ) {
2749 let root_txs = self
2751 .state
2752 .get_transaction_cache_reader()
2753 .multi_get_transaction_blocks(root_digests);
2754 let ccp_digests_from_roots: HashSet<_> = root_txs
2755 .iter()
2756 .filter_map(|tx| {
2757 if let Some(tx) = tx {
2758 if tx.transaction_data().is_consensus_commit_prologue() {
2759 Some(*tx.digest())
2760 } else {
2761 None
2762 }
2763 } else {
2764 None
2765 }
2766 })
2767 .collect();
2768
2769 let txs = self
2771 .state
2772 .get_transaction_cache_reader()
2773 .multi_get_transaction_blocks(
2774 &sorted
2775 .iter()
2776 .map(|tx| tx.transaction_digest().clone())
2777 .collect::<Vec<_>>(),
2778 );
2779
2780 let ccps_in_checkpoint: Vec<_> = txs
2783 .iter()
2784 .filter_map(|tx| {
2785 if let Some(tx) = tx {
2786 if tx.transaction_data().is_consensus_commit_prologue() {
2787 Some(*tx.digest())
2788 } else {
2789 None
2790 }
2791 } else {
2792 None
2793 }
2794 })
2795 .collect();
2796
2797 for ccp_digest in &ccps_in_checkpoint {
2799 assert!(
2800 ccp_digests_from_roots.contains(ccp_digest),
2801 "CCP in checkpoint not found in roots"
2802 );
2803 }
2804
2805 if !ccps_in_checkpoint.is_empty() {
2808 assert!(
2809 txs[0]
2810 .as_ref()
2811 .unwrap()
2812 .transaction_data()
2813 .is_consensus_commit_prologue(),
2814 "First transaction must be a CCP when CCPs are present"
2815 );
2816 }
2817 }
2818}
2819
2820async fn wait_for_effects_with_retry(
2821 effects_store: &dyn TransactionCacheRead,
2822 task_name: &'static str,
2823 digests: &[TransactionDigest],
2824 tx_key: TransactionKey,
2825) -> Vec<TransactionEffects> {
2826 let delay = if in_antithesis() {
2827 15
2829 } else {
2830 5
2831 };
2832 loop {
2833 match tokio::time::timeout(Duration::from_secs(delay), async {
2834 effects_store
2835 .notify_read_executed_effects(task_name, digests)
2836 .await
2837 })
2838 .await
2839 {
2840 Ok(effects) => break effects,
2841 Err(_) => {
2842 debug_fatal!(
2843 "Timeout waiting for transactions to be executed {:?}, retrying...",
2844 tx_key
2845 );
2846 }
2847 }
2848 }
2849}
2850
2851impl CheckpointAggregator {
2852 fn new(
2853 tables: Arc<CheckpointStore>,
2854 epoch_store: Arc<AuthorityPerEpochStore>,
2855 notify: Arc<Notify>,
2856 output: Box<dyn CertifiedCheckpointOutput>,
2857 state: Arc<AuthorityState>,
2858 metrics: Arc<CheckpointMetrics>,
2859 ) -> Self {
2860 let current = None;
2861 Self {
2862 store: tables,
2863 epoch_store,
2864 notify,
2865 current,
2866 output,
2867 state,
2868 metrics,
2869 }
2870 }
2871
2872 async fn run(mut self) {
2873 info!("Starting CheckpointAggregator");
2874 loop {
2875 if let Err(e) = self.run_and_notify().await {
2876 error!(
2877 "Error while aggregating checkpoint, will retry in 1s: {:?}",
2878 e
2879 );
2880 self.metrics.checkpoint_errors.inc();
2881 tokio::time::sleep(Duration::from_secs(1)).await;
2882 continue;
2883 }
2884
2885 let _ = timeout(Duration::from_secs(1), self.notify.notified()).await;
2886 }
2887 }
2888
2889 async fn run_and_notify(&mut self) -> SuiResult {
2890 let summaries = self.run_inner()?;
2891 for summary in summaries {
2892 self.output.certified_checkpoint_created(&summary).await?;
2893 }
2894 Ok(())
2895 }
2896
2897 fn run_inner(&mut self) -> SuiResult<Vec<CertifiedCheckpointSummary>> {
2898 let _scope = monitored_scope("CheckpointAggregator");
2899 let mut result = vec![];
2900 'outer: loop {
2901 let next_to_certify = self.next_checkpoint_to_certify()?;
2902 let current = if let Some(current) = &mut self.current {
2903 if current.summary.sequence_number < next_to_certify {
2909 assert_reachable!("skip checkpoint certification");
2910 self.current = None;
2911 continue;
2912 }
2913 current
2914 } else {
2915 let Some(summary) = self
2916 .epoch_store
2917 .get_built_checkpoint_summary(next_to_certify)?
2918 else {
2919 return Ok(result);
2920 };
2921 self.current = Some(CheckpointSignatureAggregator {
2922 next_index: 0,
2923 digest: summary.digest(),
2924 summary,
2925 signatures_by_digest: MultiStakeAggregator::new(
2926 self.epoch_store.committee().clone(),
2927 ),
2928 store: self.store.clone(),
2929 state: self.state.clone(),
2930 metrics: self.metrics.clone(),
2931 });
2932 self.current.as_mut().unwrap()
2933 };
2934
2935 let epoch_tables = self
2936 .epoch_store
2937 .tables()
2938 .expect("should not run past end of epoch");
2939 let iter = epoch_tables
2940 .pending_checkpoint_signatures
2941 .safe_iter_with_bounds(
2942 Some((current.summary.sequence_number, current.next_index)),
2943 None,
2944 );
2945 for item in iter {
2946 let ((seq, index), data) = item?;
2947 if seq != current.summary.sequence_number {
2948 trace!(
2949 checkpoint_seq =? current.summary.sequence_number,
2950 "Not enough checkpoint signatures",
2951 );
2952 return Ok(result);
2954 }
2955 trace!(
2956 checkpoint_seq = current.summary.sequence_number,
2957 "Processing signature for checkpoint (digest: {:?}) from {:?}",
2958 current.summary.digest(),
2959 data.summary.auth_sig().authority.concise()
2960 );
2961 self.metrics
2962 .checkpoint_participation
2963 .with_label_values(&[&format!(
2964 "{:?}",
2965 data.summary.auth_sig().authority.concise()
2966 )])
2967 .inc();
2968 if let Ok(auth_signature) = current.try_aggregate(data) {
2969 debug!(
2970 checkpoint_seq = current.summary.sequence_number,
2971 "Successfully aggregated signatures for checkpoint (digest: {:?})",
2972 current.summary.digest(),
2973 );
2974 let summary = VerifiedCheckpoint::new_unchecked(
2975 CertifiedCheckpointSummary::new_from_data_and_sig(
2976 current.summary.clone(),
2977 auth_signature,
2978 ),
2979 );
2980
2981 self.store.insert_certified_checkpoint(&summary)?;
2982 self.metrics
2983 .last_certified_checkpoint
2984 .set(current.summary.sequence_number as i64);
2985 current.summary.report_checkpoint_age(
2986 &self.metrics.last_certified_checkpoint_age,
2987 &self.metrics.last_certified_checkpoint_age_ms,
2988 );
2989 result.push(summary.into_inner());
2990 self.current = None;
2991 continue 'outer;
2992 } else {
2993 current.next_index = index + 1;
2994 }
2995 }
2996 break;
2997 }
2998 Ok(result)
2999 }
3000
3001 fn next_checkpoint_to_certify(&self) -> SuiResult<CheckpointSequenceNumber> {
3002 Ok(self
3003 .store
3004 .tables
3005 .certified_checkpoints
3006 .reversed_safe_iter_with_bounds(None, None)?
3007 .next()
3008 .transpose()?
3009 .map(|(seq, _)| seq + 1)
3010 .unwrap_or_default())
3011 }
3012}
3013
3014impl CheckpointSignatureAggregator {
3015 #[allow(clippy::result_unit_err)]
3016 pub fn try_aggregate(
3017 &mut self,
3018 data: CheckpointSignatureMessage,
3019 ) -> Result<AuthorityStrongQuorumSignInfo, ()> {
3020 let their_digest = *data.summary.digest();
3021 let (_, signature) = data.summary.into_data_and_sig();
3022 let author = signature.authority;
3023 let envelope =
3024 SignedCheckpointSummary::new_from_data_and_sig(self.summary.clone(), signature);
3025 match self.signatures_by_digest.insert(their_digest, envelope) {
3026 InsertResult::Failed { error }
3028 if matches!(
3029 error.as_inner(),
3030 SuiErrorKind::StakeAggregatorRepeatedSigner {
3031 conflicting_sig: false,
3032 ..
3033 },
3034 ) =>
3035 {
3036 Err(())
3037 }
3038 InsertResult::Failed { error } => {
3039 warn!(
3040 checkpoint_seq = self.summary.sequence_number,
3041 "Failed to aggregate new signature from validator {:?}: {:?}",
3042 author.concise(),
3043 error
3044 );
3045 self.check_for_split_brain();
3046 Err(())
3047 }
3048 InsertResult::QuorumReached(cert) => {
3049 if their_digest != self.digest {
3052 self.metrics.remote_checkpoint_forks.inc();
3053 warn!(
3054 checkpoint_seq = self.summary.sequence_number,
3055 "Validator {:?} has mismatching checkpoint digest {}, we have digest {}",
3056 author.concise(),
3057 their_digest,
3058 self.digest
3059 );
3060 return Err(());
3061 }
3062 Ok(cert)
3063 }
3064 InsertResult::NotEnoughVotes {
3065 bad_votes: _,
3066 bad_authorities: _,
3067 } => {
3068 self.check_for_split_brain();
3069 Err(())
3070 }
3071 }
3072 }
3073
3074 fn check_for_split_brain(&self) {
3078 debug!(
3079 checkpoint_seq = self.summary.sequence_number,
3080 "Checking for split brain condition"
3081 );
3082 if self.signatures_by_digest.quorum_unreachable() {
3083 let all_unique_values = self.signatures_by_digest.get_all_unique_values();
3089 let digests_by_stake_messages = all_unique_values
3090 .iter()
3091 .sorted_by_key(|(_, (_, stake))| -(*stake as i64))
3092 .map(|(digest, (_authorities, total_stake))| {
3093 format!("{:?} (total stake: {})", digest, total_stake)
3094 })
3095 .collect::<Vec<String>>();
3096 fail_point_arg!("kill_split_brain_node", |(
3097 checkpoint_overrides,
3098 forked_authorities,
3099 ): (
3100 std::sync::Arc<std::sync::Mutex<std::collections::BTreeMap<u64, String>>>,
3101 std::sync::Arc<std::sync::Mutex<std::collections::HashSet<AuthorityName>>>,
3102 )| {
3103 #[cfg(msim)]
3104 {
3105 if let (Ok(mut overrides), Ok(forked_authorities_set)) =
3106 (checkpoint_overrides.lock(), forked_authorities.lock())
3107 {
3108 let correct_digest = all_unique_values
3110 .iter()
3111 .find(|(_, (authorities, _))| {
3112 authorities
3114 .iter()
3115 .any(|auth| !forked_authorities_set.contains(auth))
3116 })
3117 .map(|(digest, _)| digest.to_string())
3118 .unwrap_or_else(|| {
3119 all_unique_values
3121 .iter()
3122 .max_by_key(|(_, (_, stake))| *stake)
3123 .map(|(digest, _)| digest.to_string())
3124 .unwrap_or_else(|| self.digest.to_string())
3125 });
3126
3127 overrides.insert(self.summary.sequence_number, correct_digest.clone());
3128
3129 tracing::error!(
3130 fatal = true,
3131 "Fork recovery test: detected split-brain for sequence number: {}, using digest: {}",
3132 self.summary.sequence_number,
3133 correct_digest
3134 );
3135 }
3136 }
3137 });
3138
3139 debug_fatal!(
3140 "Split brain detected in checkpoint signature aggregation for checkpoint {:?}. Remaining stake: {:?}, Digests by stake: {:?}",
3141 self.summary.sequence_number,
3142 self.signatures_by_digest.uncommitted_stake(),
3143 digests_by_stake_messages
3144 );
3145 self.metrics.split_brain_checkpoint_forks.inc();
3146
3147 let all_unique_values = self.signatures_by_digest.get_all_unique_values();
3148 let local_summary = self.summary.clone();
3149 let state = self.state.clone();
3150 let tables = self.store.clone();
3151
3152 tokio::spawn(async move {
3153 diagnose_split_brain(all_unique_values, local_summary, state, tables).await;
3154 });
3155 }
3156 }
3157}
3158
3159async fn diagnose_split_brain(
3165 all_unique_values: BTreeMap<CheckpointDigest, (Vec<AuthorityName>, StakeUnit)>,
3166 local_summary: CheckpointSummary,
3167 state: Arc<AuthorityState>,
3168 tables: Arc<CheckpointStore>,
3169) {
3170 debug!(
3171 checkpoint_seq = local_summary.sequence_number,
3172 "Running split brain diagnostics..."
3173 );
3174 let time = SystemTime::now();
3175 let digest_to_validator = all_unique_values
3177 .iter()
3178 .filter_map(|(digest, (validators, _))| {
3179 if *digest != local_summary.digest() {
3180 let random_validator = validators.choose(&mut get_rng()).unwrap();
3181 Some((*digest, *random_validator))
3182 } else {
3183 None
3184 }
3185 })
3186 .collect::<HashMap<_, _>>();
3187 if digest_to_validator.is_empty() {
3188 panic!(
3189 "Given split brain condition, there should be at \
3190 least one validator that disagrees with local signature"
3191 );
3192 }
3193
3194 let epoch_store = state.load_epoch_store_one_call_per_task();
3195 let committee = epoch_store
3196 .epoch_start_state()
3197 .get_sui_committee_with_network_metadata();
3198 let network_config = default_mysten_network_config();
3199 let network_clients =
3200 make_network_authority_clients_with_network_config(&committee, &network_config);
3201
3202 let response_futures = digest_to_validator
3204 .values()
3205 .cloned()
3206 .map(|validator| {
3207 let client = network_clients
3208 .get(&validator)
3209 .expect("Failed to get network client");
3210 let request = CheckpointRequestV2 {
3211 sequence_number: Some(local_summary.sequence_number),
3212 request_content: true,
3213 certified: false,
3214 };
3215 client.handle_checkpoint_v2(request)
3216 })
3217 .collect::<Vec<_>>();
3218
3219 let digest_name_pair = digest_to_validator.iter();
3220 let response_data = futures::future::join_all(response_futures)
3221 .await
3222 .into_iter()
3223 .zip(digest_name_pair)
3224 .filter_map(|(response, (digest, name))| match response {
3225 Ok(response) => match response {
3226 CheckpointResponseV2 {
3227 checkpoint: Some(CheckpointSummaryResponse::Pending(summary)),
3228 contents: Some(contents),
3229 } => Some((*name, *digest, summary, contents)),
3230 CheckpointResponseV2 {
3231 checkpoint: Some(CheckpointSummaryResponse::Certified(_)),
3232 contents: _,
3233 } => {
3234 panic!("Expected pending checkpoint, but got certified checkpoint");
3235 }
3236 CheckpointResponseV2 {
3237 checkpoint: None,
3238 contents: _,
3239 } => {
3240 error!(
3241 "Summary for checkpoint {:?} not found on validator {:?}",
3242 local_summary.sequence_number, name
3243 );
3244 None
3245 }
3246 CheckpointResponseV2 {
3247 checkpoint: _,
3248 contents: None,
3249 } => {
3250 error!(
3251 "Contents for checkpoint {:?} not found on validator {:?}",
3252 local_summary.sequence_number, name
3253 );
3254 None
3255 }
3256 },
3257 Err(e) => {
3258 error!(
3259 "Failed to get checkpoint contents from validator for fork diagnostics: {:?}",
3260 e
3261 );
3262 None
3263 }
3264 })
3265 .collect::<Vec<_>>();
3266
3267 let local_checkpoint_contents = tables
3268 .get_checkpoint_contents(&local_summary.content_digest)
3269 .unwrap_or_else(|_| {
3270 panic!(
3271 "Could not find checkpoint contents for digest {:?}",
3272 local_summary.digest()
3273 )
3274 })
3275 .unwrap_or_else(|| {
3276 panic!(
3277 "Could not find local full checkpoint contents for checkpoint {:?}, digest {:?}",
3278 local_summary.sequence_number,
3279 local_summary.digest()
3280 )
3281 });
3282 let local_contents_text = format!("{local_checkpoint_contents:?}");
3283
3284 let local_summary_text = format!("{local_summary:?}");
3285 let local_validator = state.name.concise();
3286 let diff_patches = response_data
3287 .iter()
3288 .map(|(name, other_digest, other_summary, contents)| {
3289 let other_contents_text = format!("{contents:?}");
3290 let other_summary_text = format!("{other_summary:?}");
3291 let (local_transactions, local_effects): (Vec<_>, Vec<_>) = local_checkpoint_contents
3292 .enumerate_transactions(&local_summary)
3293 .map(|(_, exec_digest)| (exec_digest.transaction, exec_digest.effects))
3294 .unzip();
3295 let (other_transactions, other_effects): (Vec<_>, Vec<_>) = contents
3296 .enumerate_transactions(other_summary)
3297 .map(|(_, exec_digest)| (exec_digest.transaction, exec_digest.effects))
3298 .unzip();
3299 let summary_patch = create_patch(&local_summary_text, &other_summary_text);
3300 let contents_patch = create_patch(&local_contents_text, &other_contents_text);
3301 let local_transactions_text = format!("{local_transactions:#?}");
3302 let other_transactions_text = format!("{other_transactions:#?}");
3303 let transactions_patch =
3304 create_patch(&local_transactions_text, &other_transactions_text);
3305 let local_effects_text = format!("{local_effects:#?}");
3306 let other_effects_text = format!("{other_effects:#?}");
3307 let effects_patch = create_patch(&local_effects_text, &other_effects_text);
3308 let seq_number = local_summary.sequence_number;
3309 let local_digest = local_summary.digest();
3310 let other_validator = name.concise();
3311 format!(
3312 "Checkpoint: {seq_number:?}\n\
3313 Local validator (original): {local_validator:?}, digest: {local_digest:?}\n\
3314 Other validator (modified): {other_validator:?}, digest: {other_digest:?}\n\n\
3315 Summary Diff: \n{summary_patch}\n\n\
3316 Contents Diff: \n{contents_patch}\n\n\
3317 Transactions Diff: \n{transactions_patch}\n\n\
3318 Effects Diff: \n{effects_patch}",
3319 )
3320 })
3321 .collect::<Vec<_>>()
3322 .join("\n\n\n");
3323
3324 let header = format!(
3325 "Checkpoint Fork Dump - Authority {local_validator:?}: \n\
3326 Datetime: {:?}",
3327 time
3328 );
3329 let fork_logs_text = format!("{header}\n\n{diff_patches}\n\n");
3330 let path = tempfile::tempdir()
3331 .expect("Failed to create tempdir")
3332 .keep()
3333 .join(Path::new("checkpoint_fork_dump.txt"));
3334 let mut file = File::create(path).unwrap();
3335 write!(file, "{}", fork_logs_text).unwrap();
3336 debug!("{}", fork_logs_text);
3337}
3338
3339pub trait CheckpointServiceNotify {
3340 fn notify_checkpoint_signature(
3341 &self,
3342 epoch_store: &AuthorityPerEpochStore,
3343 info: &CheckpointSignatureMessage,
3344 ) -> SuiResult;
3345
3346 fn notify_checkpoint(&self) -> SuiResult;
3347}
3348
3349#[allow(clippy::large_enum_variant)]
3350enum CheckpointServiceState {
3351 Unstarted(
3352 (
3353 CheckpointBuilder,
3354 CheckpointAggregator,
3355 CheckpointStateHasher,
3356 ),
3357 ),
3358 Started,
3359}
3360
3361impl CheckpointServiceState {
3362 fn take_unstarted(
3363 &mut self,
3364 ) -> (
3365 CheckpointBuilder,
3366 CheckpointAggregator,
3367 CheckpointStateHasher,
3368 ) {
3369 let mut state = CheckpointServiceState::Started;
3370 std::mem::swap(self, &mut state);
3371
3372 match state {
3373 CheckpointServiceState::Unstarted((builder, aggregator, hasher)) => {
3374 (builder, aggregator, hasher)
3375 }
3376 CheckpointServiceState::Started => panic!("CheckpointServiceState is already started"),
3377 }
3378 }
3379}
3380
3381pub struct CheckpointService {
3382 tables: Arc<CheckpointStore>,
3383 notify_builder: Arc<Notify>,
3384 notify_aggregator: Arc<Notify>,
3385 last_signature_index: Mutex<u64>,
3386 highest_currently_built_seq_tx: watch::Sender<CheckpointSequenceNumber>,
3388 highest_previously_built_seq: CheckpointSequenceNumber,
3391 metrics: Arc<CheckpointMetrics>,
3392 state: Mutex<CheckpointServiceState>,
3393}
3394
3395impl CheckpointService {
3396 pub fn build(
3398 state: Arc<AuthorityState>,
3399 checkpoint_store: Arc<CheckpointStore>,
3400 epoch_store: Arc<AuthorityPerEpochStore>,
3401 effects_store: Arc<dyn TransactionCacheRead>,
3402 global_state_hasher: Weak<GlobalStateHasher>,
3403 checkpoint_output: Box<dyn CheckpointOutput>,
3404 certified_checkpoint_output: Box<dyn CertifiedCheckpointOutput>,
3405 metrics: Arc<CheckpointMetrics>,
3406 max_transactions_per_checkpoint: usize,
3407 max_checkpoint_size_bytes: usize,
3408 ) -> Arc<Self> {
3409 info!(
3410 "Starting checkpoint service with {max_transactions_per_checkpoint} max_transactions_per_checkpoint and {max_checkpoint_size_bytes} max_checkpoint_size_bytes"
3411 );
3412 let notify_builder = Arc::new(Notify::new());
3413 let notify_aggregator = Arc::new(Notify::new());
3414
3415 let highest_previously_built_seq = checkpoint_store
3417 .get_latest_locally_computed_checkpoint()
3418 .expect("failed to get latest locally computed checkpoint")
3419 .map(|s| s.sequence_number)
3420 .unwrap_or(0);
3421
3422 let highest_currently_built_seq =
3423 CheckpointBuilder::load_last_built_checkpoint_summary(&epoch_store, &checkpoint_store)
3424 .expect("epoch should not have ended")
3425 .map(|(seq, _)| seq)
3426 .unwrap_or(0);
3427
3428 let (highest_currently_built_seq_tx, _) = watch::channel(highest_currently_built_seq);
3429
3430 let aggregator = CheckpointAggregator::new(
3431 checkpoint_store.clone(),
3432 epoch_store.clone(),
3433 notify_aggregator.clone(),
3434 certified_checkpoint_output,
3435 state.clone(),
3436 metrics.clone(),
3437 );
3438
3439 let (send_to_hasher, receive_from_builder) = mpsc::channel(16);
3440
3441 let ckpt_state_hasher = CheckpointStateHasher::new(
3442 epoch_store.clone(),
3443 global_state_hasher.clone(),
3444 receive_from_builder,
3445 );
3446
3447 let builder = CheckpointBuilder::new(
3448 state.clone(),
3449 checkpoint_store.clone(),
3450 epoch_store.clone(),
3451 notify_builder.clone(),
3452 effects_store,
3453 global_state_hasher,
3454 send_to_hasher,
3455 checkpoint_output,
3456 notify_aggregator.clone(),
3457 highest_currently_built_seq_tx.clone(),
3458 metrics.clone(),
3459 max_transactions_per_checkpoint,
3460 max_checkpoint_size_bytes,
3461 );
3462
3463 let last_signature_index = epoch_store
3464 .get_last_checkpoint_signature_index()
3465 .expect("should not cross end of epoch");
3466 let last_signature_index = Mutex::new(last_signature_index);
3467
3468 Arc::new(Self {
3469 tables: checkpoint_store,
3470 notify_builder,
3471 notify_aggregator,
3472 last_signature_index,
3473 highest_currently_built_seq_tx,
3474 highest_previously_built_seq,
3475 metrics,
3476 state: Mutex::new(CheckpointServiceState::Unstarted((
3477 builder,
3478 aggregator,
3479 ckpt_state_hasher,
3480 ))),
3481 })
3482 }
3483
3484 pub async fn spawn(
3492 &self,
3493 epoch_store: Arc<AuthorityPerEpochStore>,
3494 consensus_replay_waiter: Option<ReplayWaiter>,
3495 ) {
3496 let (builder, aggregator, state_hasher) = self.state.lock().take_unstarted();
3497
3498 if let Some(last_committed_seq) = self
3501 .tables
3502 .get_highest_executed_checkpoint()
3503 .expect("Failed to get highest executed checkpoint")
3504 .map(|checkpoint| *checkpoint.sequence_number())
3505 {
3506 if let Err(e) = builder
3507 .epoch_store
3508 .clear_state_hashes_after_checkpoint(last_committed_seq)
3509 {
3510 error!(
3511 "Failed to clear state hashes after checkpoint {}: {:?}",
3512 last_committed_seq, e
3513 );
3514 } else {
3515 info!(
3516 "Cleared state hashes after checkpoint {} to ensure consistent ECMH computation",
3517 last_committed_seq
3518 );
3519 }
3520 }
3521
3522 let (builder_finished_tx, builder_finished_rx) = tokio::sync::oneshot::channel();
3523
3524 let state_hasher_task = spawn_monitored_task!(state_hasher.run());
3525 let aggregator_task = spawn_monitored_task!(aggregator.run());
3526
3527 spawn_monitored_task!(async move {
3528 epoch_store
3529 .within_alive_epoch(async move {
3530 builder.run(consensus_replay_waiter).await;
3531 builder_finished_tx.send(()).ok();
3532 })
3533 .await
3534 .ok();
3535
3536 state_hasher_task
3538 .await
3539 .expect("state hasher should exit normally");
3540
3541 aggregator_task.abort();
3544 aggregator_task.await.ok();
3545 });
3546
3547 if tokio::time::timeout(Duration::from_secs(120), async move {
3553 tokio::select! {
3554 _ = builder_finished_rx => { debug!("CheckpointBuilder finished"); }
3555 _ = self.wait_for_rebuilt_checkpoints() => (),
3556 }
3557 })
3558 .await
3559 .is_err()
3560 {
3561 debug_fatal!("Timed out waiting for checkpoints to be rebuilt");
3562 }
3563 }
3564}
3565
3566impl CheckpointService {
3567 pub async fn wait_for_rebuilt_checkpoints(&self) {
3573 let highest_previously_built_seq = self.highest_previously_built_seq;
3574 let mut rx = self.highest_currently_built_seq_tx.subscribe();
3575 let mut highest_currently_built_seq = *rx.borrow_and_update();
3576 info!(
3577 "Waiting for checkpoints to be rebuilt, previously built seq: {highest_previously_built_seq}, currently built seq: {highest_currently_built_seq}"
3578 );
3579 loop {
3580 if highest_currently_built_seq >= highest_previously_built_seq {
3581 info!("Checkpoint rebuild complete");
3582 break;
3583 }
3584 rx.changed().await.unwrap();
3585 highest_currently_built_seq = *rx.borrow_and_update();
3586 }
3587 }
3588
3589 #[cfg(test)]
3590 fn write_and_notify_checkpoint_for_testing(
3591 &self,
3592 epoch_store: &AuthorityPerEpochStore,
3593 checkpoint: PendingCheckpoint,
3594 ) -> SuiResult {
3595 use crate::authority::authority_per_epoch_store::consensus_quarantine::ConsensusCommitOutput;
3596
3597 let mut output = ConsensusCommitOutput::new(0);
3598 epoch_store.write_pending_checkpoint(&mut output, &checkpoint)?;
3599 output.set_default_commit_stats_for_testing();
3600 epoch_store.push_consensus_output_for_tests(output);
3601 self.notify_checkpoint()?;
3602 Ok(())
3603 }
3604}
3605
3606impl CheckpointServiceNotify for CheckpointService {
3607 fn notify_checkpoint_signature(
3608 &self,
3609 epoch_store: &AuthorityPerEpochStore,
3610 info: &CheckpointSignatureMessage,
3611 ) -> SuiResult {
3612 let sequence = info.summary.sequence_number;
3613 let signer = info.summary.auth_sig().authority.concise();
3614
3615 if let Some(highest_verified_checkpoint) = self
3616 .tables
3617 .get_highest_verified_checkpoint()?
3618 .map(|x| *x.sequence_number())
3619 && sequence <= highest_verified_checkpoint
3620 {
3621 trace!(
3622 checkpoint_seq = sequence,
3623 "Ignore checkpoint signature from {} - already certified", signer,
3624 );
3625 self.metrics
3626 .last_ignored_checkpoint_signature_received
3627 .set(sequence as i64);
3628 return Ok(());
3629 }
3630 trace!(
3631 checkpoint_seq = sequence,
3632 "Received checkpoint signature, digest {} from {}",
3633 info.summary.digest(),
3634 signer,
3635 );
3636 self.metrics
3637 .last_received_checkpoint_signatures
3638 .with_label_values(&[&signer.to_string()])
3639 .set(sequence as i64);
3640 let mut index = self.last_signature_index.lock();
3643 *index += 1;
3644 epoch_store.insert_checkpoint_signature(sequence, *index, info)?;
3645 self.notify_aggregator.notify_one();
3646 Ok(())
3647 }
3648
3649 fn notify_checkpoint(&self) -> SuiResult {
3650 self.notify_builder.notify_one();
3651 Ok(())
3652 }
3653}
3654
3655pub struct CheckpointServiceNoop {}
3657impl CheckpointServiceNotify for CheckpointServiceNoop {
3658 fn notify_checkpoint_signature(
3659 &self,
3660 _: &AuthorityPerEpochStore,
3661 _: &CheckpointSignatureMessage,
3662 ) -> SuiResult {
3663 Ok(())
3664 }
3665
3666 fn notify_checkpoint(&self) -> SuiResult {
3667 Ok(())
3668 }
3669}
3670
3671impl PendingCheckpoint {
3672 pub fn height(&self) -> CheckpointHeight {
3673 self.details.checkpoint_height
3674 }
3675
3676 pub fn roots(&self) -> &Vec<TransactionKey> {
3677 &self.roots
3678 }
3679
3680 pub fn details(&self) -> &PendingCheckpointInfo {
3681 &self.details
3682 }
3683}
3684
3685impl PendingCheckpointV2 {
3686 pub fn height(&self) -> CheckpointHeight {
3687 self.details.checkpoint_height
3688 }
3689
3690 pub(crate) fn num_roots(&self) -> usize {
3691 self.roots.iter().map(|r| r.tx_roots.len()).sum()
3692 }
3693}
3694
3695pin_project! {
3696 pub struct PollCounter<Fut> {
3697 #[pin]
3698 future: Fut,
3699 count: usize,
3700 }
3701}
3702
3703impl<Fut> PollCounter<Fut> {
3704 pub fn new(future: Fut) -> Self {
3705 Self { future, count: 0 }
3706 }
3707
3708 pub fn count(&self) -> usize {
3709 self.count
3710 }
3711}
3712
3713impl<Fut: Future> Future for PollCounter<Fut> {
3714 type Output = (usize, Fut::Output);
3715
3716 fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
3717 let this = self.project();
3718 *this.count += 1;
3719 match this.future.poll(cx) {
3720 Poll::Ready(output) => Poll::Ready((*this.count, output)),
3721 Poll::Pending => Poll::Pending,
3722 }
3723 }
3724}
3725
3726fn poll_count<Fut>(future: Fut) -> PollCounter<Fut> {
3727 PollCounter::new(future)
3728}
3729
3730#[cfg(test)]
3731mod tests {
3732 use super::*;
3733 use crate::authority::test_authority_builder::TestAuthorityBuilder;
3734 use crate::transaction_outputs::TransactionOutputs;
3735 use fastcrypto_zkp::bn254::zk_login::{JWK, JwkId};
3736 use futures::FutureExt as _;
3737 use futures::future::BoxFuture;
3738 use std::collections::HashMap;
3739 use std::ops::Deref;
3740 use sui_macros::sim_test;
3741 use sui_protocol_config::{Chain, ProtocolConfig};
3742 use sui_types::accumulator_event::AccumulatorEvent;
3743 use sui_types::authenticator_state::ActiveJwk;
3744 use sui_types::base_types::{SequenceNumber, TransactionEffectsDigest};
3745 use sui_types::crypto::Signature;
3746 use sui_types::effects::{TransactionEffects, TransactionEvents};
3747 use sui_types::messages_checkpoint::SignedCheckpointSummary;
3748 use sui_types::transaction::VerifiedTransaction;
3749 use tokio::sync::mpsc;
3750
3751 #[tokio::test]
3752 async fn test_clear_locally_computed_checkpoints_from_deletes_inclusive_range() {
3753 let store = CheckpointStore::new_for_tests();
3754 let protocol = sui_protocol_config::ProtocolConfig::get_for_max_version_UNSAFE();
3755 for seq in 70u64..=80u64 {
3756 let contents =
3757 sui_types::messages_checkpoint::CheckpointContents::new_with_digests_only_for_tests(
3758 [sui_types::base_types::ExecutionDigests::new(
3759 sui_types::digests::TransactionDigest::random(),
3760 sui_types::digests::TransactionEffectsDigest::ZERO,
3761 )],
3762 );
3763 let summary = sui_types::messages_checkpoint::CheckpointSummary::new(
3764 &protocol,
3765 0,
3766 seq,
3767 0,
3768 &contents,
3769 None,
3770 sui_types::gas::GasCostSummary::default(),
3771 None,
3772 0,
3773 Vec::new(),
3774 Vec::new(),
3775 );
3776 store
3777 .tables
3778 .locally_computed_checkpoints
3779 .insert(&seq, &summary)
3780 .unwrap();
3781 }
3782
3783 store
3784 .clear_locally_computed_checkpoints_from(76)
3785 .expect("clear should succeed");
3786
3787 assert!(
3789 store
3790 .tables
3791 .locally_computed_checkpoints
3792 .get(&75)
3793 .unwrap()
3794 .is_some()
3795 );
3796 assert!(
3797 store
3798 .tables
3799 .locally_computed_checkpoints
3800 .get(&76)
3801 .unwrap()
3802 .is_none()
3803 );
3804
3805 for seq in 70u64..76u64 {
3806 assert!(
3807 store
3808 .tables
3809 .locally_computed_checkpoints
3810 .get(&seq)
3811 .unwrap()
3812 .is_some()
3813 );
3814 }
3815 for seq in 76u64..=80u64 {
3816 assert!(
3817 store
3818 .tables
3819 .locally_computed_checkpoints
3820 .get(&seq)
3821 .unwrap()
3822 .is_none()
3823 );
3824 }
3825 }
3826
3827 #[tokio::test]
3828 async fn test_fork_detection_storage() {
3829 let store = CheckpointStore::new_for_tests();
3830 let seq_num = 42;
3832 let digest = CheckpointDigest::random();
3833
3834 assert!(store.get_checkpoint_fork_detected().unwrap().is_none());
3835
3836 store
3837 .record_checkpoint_fork_detected(seq_num, digest)
3838 .unwrap();
3839
3840 let retrieved = store.get_checkpoint_fork_detected().unwrap();
3841 assert!(retrieved.is_some());
3842 let (retrieved_seq, retrieved_digest) = retrieved.unwrap();
3843 assert_eq!(retrieved_seq, seq_num);
3844 assert_eq!(retrieved_digest, digest);
3845
3846 store.clear_checkpoint_fork_detected().unwrap();
3847 assert!(store.get_checkpoint_fork_detected().unwrap().is_none());
3848
3849 let tx_digest = TransactionDigest::random();
3851 let expected_effects = TransactionEffectsDigest::random();
3852 let actual_effects = TransactionEffectsDigest::random();
3853
3854 assert!(store.get_transaction_fork_detected().unwrap().is_none());
3855
3856 store
3857 .record_transaction_fork_detected(tx_digest, expected_effects, actual_effects)
3858 .unwrap();
3859
3860 let retrieved = store.get_transaction_fork_detected().unwrap();
3861 assert!(retrieved.is_some());
3862 let (retrieved_tx, retrieved_expected, retrieved_actual) = retrieved.unwrap();
3863 assert_eq!(retrieved_tx, tx_digest);
3864 assert_eq!(retrieved_expected, expected_effects);
3865 assert_eq!(retrieved_actual, actual_effects);
3866
3867 store.clear_transaction_fork_detected().unwrap();
3868 assert!(store.get_transaction_fork_detected().unwrap().is_none());
3869 }
3870
3871 #[sim_test]
3872 pub async fn checkpoint_builder_test() {
3873 telemetry_subscribers::init_for_testing();
3874
3875 let mut protocol_config =
3876 ProtocolConfig::get_for_version(ProtocolVersion::max(), Chain::Unknown);
3877 protocol_config.disable_accumulators_for_testing();
3878 protocol_config.set_split_checkpoints_in_consensus_handler_for_testing(false);
3879 protocol_config.set_min_checkpoint_interval_ms_for_testing(100);
3880 let state = TestAuthorityBuilder::new()
3881 .with_protocol_config(protocol_config)
3882 .build()
3883 .await;
3884
3885 let dummy_tx = VerifiedTransaction::new_authenticator_state_update(
3886 0,
3887 0,
3888 vec![],
3889 SequenceNumber::new(),
3890 );
3891
3892 let jwks = {
3893 let mut jwks = Vec::new();
3894 while bcs::to_bytes(&jwks).unwrap().len() < 40_000 {
3895 jwks.push(ActiveJwk {
3896 jwk_id: JwkId::new(
3897 "https://accounts.google.com".to_string(),
3898 "1234567890".to_string(),
3899 ),
3900 jwk: JWK {
3901 kty: "RSA".to_string(),
3902 e: "AQAB".to_string(),
3903 n: "1234567890".to_string(),
3904 alg: "RS256".to_string(),
3905 },
3906 epoch: 0,
3907 });
3908 }
3909 jwks
3910 };
3911
3912 let dummy_tx_with_data =
3913 VerifiedTransaction::new_authenticator_state_update(0, 1, jwks, SequenceNumber::new());
3914
3915 for i in 0..15 {
3916 state
3917 .database_for_testing()
3918 .perpetual_tables
3919 .transactions
3920 .insert(&d(i), dummy_tx.serializable_ref())
3921 .unwrap();
3922 }
3923 for i in 15..20 {
3924 state
3925 .database_for_testing()
3926 .perpetual_tables
3927 .transactions
3928 .insert(&d(i), dummy_tx_with_data.serializable_ref())
3929 .unwrap();
3930 }
3931
3932 let mut store = HashMap::<TransactionDigest, TransactionEffects>::new();
3933 commit_cert_for_test(
3934 &mut store,
3935 state.clone(),
3936 d(1),
3937 vec![d(2), d(3)],
3938 GasCostSummary::new(11, 12, 11, 1),
3939 );
3940 commit_cert_for_test(
3941 &mut store,
3942 state.clone(),
3943 d(2),
3944 vec![d(3), d(4)],
3945 GasCostSummary::new(21, 22, 21, 1),
3946 );
3947 commit_cert_for_test(
3948 &mut store,
3949 state.clone(),
3950 d(3),
3951 vec![],
3952 GasCostSummary::new(31, 32, 31, 1),
3953 );
3954 commit_cert_for_test(
3955 &mut store,
3956 state.clone(),
3957 d(4),
3958 vec![],
3959 GasCostSummary::new(41, 42, 41, 1),
3960 );
3961 for i in [5, 6, 7, 10, 11, 12, 13] {
3962 commit_cert_for_test(
3963 &mut store,
3964 state.clone(),
3965 d(i),
3966 vec![],
3967 GasCostSummary::new(41, 42, 41, 1),
3968 );
3969 }
3970 for i in [15, 16, 17] {
3971 commit_cert_for_test(
3972 &mut store,
3973 state.clone(),
3974 d(i),
3975 vec![],
3976 GasCostSummary::new(51, 52, 51, 1),
3977 );
3978 }
3979 let all_digests: Vec<_> = store.keys().copied().collect();
3980 for digest in all_digests {
3981 let signature = Signature::Ed25519SuiSignature(Default::default()).into();
3982 state
3983 .epoch_store_for_testing()
3984 .test_insert_user_signature(digest, vec![(signature, None)]);
3985 }
3986
3987 let (output, mut result) = mpsc::channel::<(CheckpointContents, CheckpointSummary)>(10);
3988 let (certified_output, mut certified_result) =
3989 mpsc::channel::<CertifiedCheckpointSummary>(10);
3990 let store = Arc::new(store);
3991
3992 let ckpt_dir = tempfile::tempdir().unwrap();
3993 let checkpoint_store =
3994 CheckpointStore::new(ckpt_dir.path(), Arc::new(PrunerWatermarks::default()));
3995 let epoch_store = state.epoch_store_for_testing();
3996
3997 let global_state_hasher = Arc::new(GlobalStateHasher::new_for_tests(
3998 state.get_global_state_hash_store().clone(),
3999 ));
4000
4001 let checkpoint_service = CheckpointService::build(
4002 state.clone(),
4003 checkpoint_store,
4004 epoch_store.clone(),
4005 store,
4006 Arc::downgrade(&global_state_hasher),
4007 Box::new(output),
4008 Box::new(certified_output),
4009 CheckpointMetrics::new_for_tests(),
4010 3,
4011 100_000,
4012 );
4013 checkpoint_service.spawn(epoch_store.clone(), None).await;
4014
4015 checkpoint_service
4016 .write_and_notify_checkpoint_for_testing(&epoch_store, p(0, vec![4], 0))
4017 .unwrap();
4018 checkpoint_service
4019 .write_and_notify_checkpoint_for_testing(&epoch_store, p(1, vec![1, 3], 2000))
4020 .unwrap();
4021 checkpoint_service
4022 .write_and_notify_checkpoint_for_testing(&epoch_store, p(2, vec![10, 11, 12, 13], 3000))
4023 .unwrap();
4024 checkpoint_service
4025 .write_and_notify_checkpoint_for_testing(&epoch_store, p(3, vec![15, 16, 17], 4000))
4026 .unwrap();
4027 checkpoint_service
4028 .write_and_notify_checkpoint_for_testing(&epoch_store, p(4, vec![5], 4001))
4029 .unwrap();
4030 checkpoint_service
4031 .write_and_notify_checkpoint_for_testing(&epoch_store, p(5, vec![6], 5000))
4032 .unwrap();
4033
4034 let (c1c, c1s) = result.recv().await.unwrap();
4035 let (c2c, c2s) = result.recv().await.unwrap();
4036
4037 let c1t = c1c.iter().map(|d| d.transaction).collect::<Vec<_>>();
4038 let c2t = c2c.iter().map(|d| d.transaction).collect::<Vec<_>>();
4039 assert_eq!(c1t, vec![d(4)]);
4040 assert_eq!(c1s.previous_digest, None);
4041 assert_eq!(c1s.sequence_number, 0);
4042 assert_eq!(
4043 c1s.epoch_rolling_gas_cost_summary,
4044 GasCostSummary::new(41, 42, 41, 1)
4045 );
4046
4047 assert_eq!(c2t, vec![d(3), d(2), d(1)]);
4048 assert_eq!(c2s.previous_digest, Some(c1s.digest()));
4049 assert_eq!(c2s.sequence_number, 1);
4050 assert_eq!(
4051 c2s.epoch_rolling_gas_cost_summary,
4052 GasCostSummary::new(104, 108, 104, 4)
4053 );
4054
4055 let (c3c, c3s) = result.recv().await.unwrap();
4058 let c3t = c3c.iter().map(|d| d.transaction).collect::<Vec<_>>();
4059 let (c4c, c4s) = result.recv().await.unwrap();
4060 let c4t = c4c.iter().map(|d| d.transaction).collect::<Vec<_>>();
4061 assert_eq!(c3s.sequence_number, 2);
4062 assert_eq!(c3s.previous_digest, Some(c2s.digest()));
4063 assert_eq!(c4s.sequence_number, 3);
4064 assert_eq!(c4s.previous_digest, Some(c3s.digest()));
4065 assert_eq!(c3t, vec![d(10), d(11), d(12)]);
4066 assert_eq!(c4t, vec![d(13)]);
4067
4068 let (c5c, c5s) = result.recv().await.unwrap();
4071 let c5t = c5c.iter().map(|d| d.transaction).collect::<Vec<_>>();
4072 let (c6c, c6s) = result.recv().await.unwrap();
4073 let c6t = c6c.iter().map(|d| d.transaction).collect::<Vec<_>>();
4074 assert_eq!(c5s.sequence_number, 4);
4075 assert_eq!(c5s.previous_digest, Some(c4s.digest()));
4076 assert_eq!(c6s.sequence_number, 5);
4077 assert_eq!(c6s.previous_digest, Some(c5s.digest()));
4078 assert_eq!(c5t, vec![d(15), d(16)]);
4079 assert_eq!(c6t, vec![d(17)]);
4080
4081 let (c7c, c7s) = result.recv().await.unwrap();
4084 let c7t = c7c.iter().map(|d| d.transaction).collect::<Vec<_>>();
4085 assert_eq!(c7t, vec![d(5), d(6)]);
4086 assert_eq!(c7s.previous_digest, Some(c6s.digest()));
4087 assert_eq!(c7s.sequence_number, 6);
4088
4089 let c1ss = SignedCheckpointSummary::new(c1s.epoch, c1s, state.secret.deref(), state.name);
4090 let c2ss = SignedCheckpointSummary::new(c2s.epoch, c2s, state.secret.deref(), state.name);
4091
4092 checkpoint_service
4093 .notify_checkpoint_signature(
4094 &epoch_store,
4095 &CheckpointSignatureMessage { summary: c2ss },
4096 )
4097 .unwrap();
4098 checkpoint_service
4099 .notify_checkpoint_signature(
4100 &epoch_store,
4101 &CheckpointSignatureMessage { summary: c1ss },
4102 )
4103 .unwrap();
4104
4105 let c1sc = certified_result.recv().await.unwrap();
4106 let c2sc = certified_result.recv().await.unwrap();
4107 assert_eq!(c1sc.sequence_number, 0);
4108 assert_eq!(c2sc.sequence_number, 1);
4109 }
4110
4111 impl TransactionCacheRead for HashMap<TransactionDigest, TransactionEffects> {
4112 fn notify_read_executed_effects(
4113 &self,
4114 _: &str,
4115 digests: &[TransactionDigest],
4116 ) -> BoxFuture<'_, Vec<TransactionEffects>> {
4117 std::future::ready(
4118 digests
4119 .iter()
4120 .map(|d| self.get(d).expect("effects not found").clone())
4121 .collect(),
4122 )
4123 .boxed()
4124 }
4125
4126 fn notify_read_executed_effects_digests(
4127 &self,
4128 _: &str,
4129 digests: &[TransactionDigest],
4130 ) -> BoxFuture<'_, Vec<TransactionEffectsDigest>> {
4131 std::future::ready(
4132 digests
4133 .iter()
4134 .map(|d| {
4135 self.get(d)
4136 .map(|fx| fx.digest())
4137 .expect("effects not found")
4138 })
4139 .collect(),
4140 )
4141 .boxed()
4142 }
4143
4144 fn multi_get_executed_effects(
4145 &self,
4146 digests: &[TransactionDigest],
4147 ) -> Vec<Option<TransactionEffects>> {
4148 digests.iter().map(|d| self.get(d).cloned()).collect()
4149 }
4150
4151 fn multi_get_transaction_blocks(
4157 &self,
4158 _: &[TransactionDigest],
4159 ) -> Vec<Option<Arc<VerifiedTransaction>>> {
4160 unimplemented!()
4161 }
4162
4163 fn multi_get_executed_effects_digests(
4164 &self,
4165 _: &[TransactionDigest],
4166 ) -> Vec<Option<TransactionEffectsDigest>> {
4167 unimplemented!()
4168 }
4169
4170 fn multi_get_effects(
4171 &self,
4172 _: &[TransactionEffectsDigest],
4173 ) -> Vec<Option<TransactionEffects>> {
4174 unimplemented!()
4175 }
4176
4177 fn multi_get_events(&self, _: &[TransactionDigest]) -> Vec<Option<TransactionEvents>> {
4178 unimplemented!()
4179 }
4180
4181 fn get_mysticeti_fastpath_outputs(
4182 &self,
4183 _: &TransactionDigest,
4184 ) -> Option<Arc<TransactionOutputs>> {
4185 unimplemented!()
4186 }
4187
4188 fn notify_read_fastpath_transaction_outputs<'a>(
4189 &'a self,
4190 _: &'a [TransactionDigest],
4191 ) -> BoxFuture<'a, Vec<Arc<crate::transaction_outputs::TransactionOutputs>>> {
4192 unimplemented!()
4193 }
4194
4195 fn take_accumulator_events(&self, _: &TransactionDigest) -> Option<Vec<AccumulatorEvent>> {
4196 unimplemented!()
4197 }
4198
4199 fn get_unchanged_loaded_runtime_objects(
4200 &self,
4201 _digest: &TransactionDigest,
4202 ) -> Option<Vec<sui_types::storage::ObjectKey>> {
4203 unimplemented!()
4204 }
4205
4206 fn transaction_executed_in_last_epoch(&self, _: &TransactionDigest, _: EpochId) -> bool {
4207 unimplemented!()
4208 }
4209 }
4210
4211 #[async_trait::async_trait]
4212 impl CheckpointOutput for mpsc::Sender<(CheckpointContents, CheckpointSummary)> {
4213 async fn checkpoint_created(
4214 &self,
4215 summary: &CheckpointSummary,
4216 contents: &CheckpointContents,
4217 _epoch_store: &Arc<AuthorityPerEpochStore>,
4218 _checkpoint_store: &Arc<CheckpointStore>,
4219 ) -> SuiResult {
4220 self.try_send((contents.clone(), summary.clone())).unwrap();
4221 Ok(())
4222 }
4223 }
4224
4225 #[async_trait::async_trait]
4226 impl CertifiedCheckpointOutput for mpsc::Sender<CertifiedCheckpointSummary> {
4227 async fn certified_checkpoint_created(
4228 &self,
4229 summary: &CertifiedCheckpointSummary,
4230 ) -> SuiResult {
4231 self.try_send(summary.clone()).unwrap();
4232 Ok(())
4233 }
4234 }
4235
4236 fn p(i: u64, t: Vec<u8>, timestamp_ms: u64) -> PendingCheckpoint {
4237 PendingCheckpoint {
4238 roots: t
4239 .into_iter()
4240 .map(|t| TransactionKey::Digest(d(t)))
4241 .collect(),
4242 details: PendingCheckpointInfo {
4243 timestamp_ms,
4244 last_of_epoch: false,
4245 checkpoint_height: i,
4246 consensus_commit_ref: CommitRef::default(),
4247 rejected_transactions_digest: Digest::default(),
4248 },
4249 }
4250 }
4251
4252 fn d(i: u8) -> TransactionDigest {
4253 let mut bytes: [u8; 32] = Default::default();
4254 bytes[0] = i;
4255 TransactionDigest::new(bytes)
4256 }
4257
4258 fn e(
4259 transaction_digest: TransactionDigest,
4260 dependencies: Vec<TransactionDigest>,
4261 gas_used: GasCostSummary,
4262 ) -> TransactionEffects {
4263 let mut effects = TransactionEffects::default();
4264 *effects.transaction_digest_mut_for_testing() = transaction_digest;
4265 *effects.dependencies_mut_for_testing() = dependencies;
4266 *effects.gas_cost_summary_mut_for_testing() = gas_used;
4267 effects
4268 }
4269
4270 fn commit_cert_for_test(
4271 store: &mut HashMap<TransactionDigest, TransactionEffects>,
4272 state: Arc<AuthorityState>,
4273 digest: TransactionDigest,
4274 dependencies: Vec<TransactionDigest>,
4275 gas_used: GasCostSummary,
4276 ) {
4277 let epoch_store = state.epoch_store_for_testing();
4278 let effects = e(digest, dependencies, gas_used);
4279 store.insert(digest, effects.clone());
4280 epoch_store.insert_executed_in_epoch(&digest);
4281 }
4282}