1mod causal_order;
5pub mod checkpoint_executor;
6mod checkpoint_output;
7mod metrics;
8
9use crate::accumulators::{self, AccumulatorSettlementTxBuilder};
10use crate::authority::AuthorityState;
11use crate::authority::epoch_start_configuration::EpochStartConfigTrait;
12use crate::authority_client::{AuthorityAPI, make_network_authority_clients_with_network_config};
13use crate::checkpoints::causal_order::CausalOrder;
14use crate::checkpoints::checkpoint_output::{CertifiedCheckpointOutput, CheckpointOutput};
15pub use crate::checkpoints::checkpoint_output::{
16 LogCheckpointOutput, SendCheckpointToStateSync, SubmitCheckpointToConsensus,
17};
18pub use crate::checkpoints::metrics::CheckpointMetrics;
19use crate::consensus_manager::ReplayWaiter;
20use crate::execution_cache::TransactionCacheRead;
21
22use crate::execution_scheduler::funds_withdraw_scheduler::FundsSettlement;
23use crate::global_state_hasher::GlobalStateHasher;
24use crate::stake_aggregator::{InsertResult, MultiStakeAggregator};
25use consensus_core::CommitRef;
26use diffy::create_patch;
27use itertools::Itertools;
28use mysten_common::random::get_rng;
29use mysten_common::sync::notify_read::{CHECKPOINT_BUILDER_NOTIFY_READ_TASK_NAME, NotifyRead};
30use mysten_common::{assert_reachable, debug_fatal, fatal, in_antithesis};
31use mysten_metrics::{MonitoredFutureExt, monitored_scope, spawn_monitored_task};
32use nonempty::NonEmpty;
33use parking_lot::Mutex;
34use pin_project_lite::pin_project;
35use serde::{Deserialize, Serialize};
36use sui_macros::fail_point_arg;
37use sui_network::default_mysten_network_config;
38use sui_types::SUI_ACCUMULATOR_ROOT_OBJECT_ID;
39use sui_types::base_types::{ConciseableName, SequenceNumber};
40use sui_types::executable_transaction::VerifiedExecutableTransaction;
41use sui_types::execution::ExecutionTimeObservationKey;
42use sui_types::messages_checkpoint::{
43 CheckpointArtifacts, CheckpointCommitment, VersionedFullCheckpointContents,
44};
45use sui_types::sui_system_state::epoch_start_sui_system_state::EpochStartSystemStateTrait;
46use tokio::sync::{mpsc, watch};
47use typed_store::rocks::{DBOptions, ReadWriteOptions, default_db_options};
48
49use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore;
50use crate::authority::authority_store_pruner::PrunerWatermarks;
51use crate::consensus_handler::SequencedConsensusTransactionKey;
52use rand::seq::SliceRandom;
53use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
54use std::fs::File;
55use std::future::Future;
56use std::io::Write;
57use std::path::Path;
58use std::pin::Pin;
59use std::sync::Arc;
60use std::sync::Weak;
61use std::task::{Context, Poll};
62use std::time::{Duration, SystemTime};
63use sui_protocol_config::ProtocolVersion;
64use sui_types::base_types::{AuthorityName, EpochId, TransactionDigest};
65use sui_types::committee::StakeUnit;
66use sui_types::crypto::AuthorityStrongQuorumSignInfo;
67use sui_types::digests::{
68 CheckpointContentsDigest, CheckpointDigest, Digest, TransactionEffectsDigest,
69};
70use sui_types::effects::{TransactionEffects, TransactionEffectsAPI};
71use sui_types::error::{SuiErrorKind, SuiResult};
72use sui_types::gas::GasCostSummary;
73use sui_types::message_envelope::Message;
74use sui_types::messages_checkpoint::{
75 CertifiedCheckpointSummary, CheckpointContents, CheckpointResponseV2, CheckpointSequenceNumber,
76 CheckpointSignatureMessage, CheckpointSummary, CheckpointSummaryResponse, CheckpointTimestamp,
77 EndOfEpochData, FullCheckpointContents, TrustedCheckpoint, VerifiedCheckpoint,
78 VerifiedCheckpointContents,
79};
80use sui_types::messages_checkpoint::{CheckpointRequestV2, SignedCheckpointSummary};
81use sui_types::messages_consensus::ConsensusTransactionKey;
82use sui_types::signature::GenericSignature;
83use sui_types::sui_system_state::{SuiSystemState, SuiSystemStateTrait};
84use sui_types::transaction::{
85 TransactionDataAPI, TransactionKey, TransactionKind, VerifiedTransaction,
86};
87use tokio::{sync::Notify, time::timeout};
88use tracing::{debug, error, info, instrument, trace, warn};
89use typed_store::DBMapUtils;
90use typed_store::Map;
91use typed_store::{
92 TypedStoreError,
93 rocks::{DBMap, MetricConf},
94};
95
96const TRANSACTION_FORK_DETECTED_KEY: u8 = 0;
97
98pub type CheckpointHeight = u64;
99
100pub struct EpochStats {
101 pub checkpoint_count: u64,
102 pub transaction_count: u64,
103 pub total_gas_reward: u64,
104}
105
106#[derive(Clone, Debug)]
107pub struct PendingCheckpointInfo {
108 pub timestamp_ms: CheckpointTimestamp,
109 pub last_of_epoch: bool,
110 pub checkpoint_height: CheckpointHeight,
113 pub consensus_commit_ref: CommitRef,
115 pub rejected_transactions_digest: Digest,
116}
117
118#[derive(Clone, Debug)]
119pub struct PendingCheckpoint {
120 pub roots: Vec<TransactionKey>,
121 pub details: PendingCheckpointInfo,
122}
123
124#[derive(Clone, Debug, Serialize, Deserialize)]
125pub struct BuilderCheckpointSummary {
126 pub summary: CheckpointSummary,
127 pub checkpoint_height: Option<CheckpointHeight>,
129 pub position_in_commit: usize,
130}
131
132#[derive(DBMapUtils)]
133#[cfg_attr(tidehunter, tidehunter)]
134pub struct CheckpointStoreTables {
135 pub(crate) checkpoint_content: DBMap<CheckpointContentsDigest, CheckpointContents>,
137
138 pub(crate) checkpoint_sequence_by_contents_digest:
140 DBMap<CheckpointContentsDigest, CheckpointSequenceNumber>,
141
142 #[default_options_override_fn = "full_checkpoint_content_table_default_config"]
146 full_checkpoint_content: DBMap<CheckpointSequenceNumber, FullCheckpointContents>,
149
150 pub(crate) certified_checkpoints: DBMap<CheckpointSequenceNumber, TrustedCheckpoint>,
152 pub(crate) checkpoint_by_digest: DBMap<CheckpointDigest, TrustedCheckpoint>,
154
155 pub(crate) locally_computed_checkpoints: DBMap<CheckpointSequenceNumber, CheckpointSummary>,
159
160 epoch_last_checkpoint_map: DBMap<EpochId, CheckpointSequenceNumber>,
162
163 pub(crate) watermarks: DBMap<CheckpointWatermark, (CheckpointSequenceNumber, CheckpointDigest)>,
166
167 pub(crate) transaction_fork_detected: DBMap<
169 u8,
170 (
171 TransactionDigest,
172 TransactionEffectsDigest,
173 TransactionEffectsDigest,
174 ),
175 >,
176 #[default_options_override_fn = "full_checkpoint_content_table_default_config"]
177 full_checkpoint_content_v2: DBMap<CheckpointSequenceNumber, VersionedFullCheckpointContents>,
178}
179
180fn full_checkpoint_content_table_default_config() -> DBOptions {
181 DBOptions {
182 options: default_db_options().options,
183 rw_options: ReadWriteOptions::default().set_log_value_hash(true),
187 }
188}
189
190impl CheckpointStoreTables {
191 #[cfg(not(tidehunter))]
192 pub fn new(path: &Path, metric_name: &'static str, _: Arc<PrunerWatermarks>) -> Self {
193 Self::open_tables_read_write(path.to_path_buf(), MetricConf::new(metric_name), None, None)
194 }
195
196 #[cfg(tidehunter)]
197 pub fn new(
198 path: &Path,
199 metric_name: &'static str,
200 pruner_watermarks: Arc<PrunerWatermarks>,
201 ) -> Self {
202 tracing::warn!("Checkpoint DB using tidehunter");
203 use crate::authority::authority_store_pruner::apply_relocation_filter;
204 use typed_store::tidehunter_util::{
205 Decision, KeySpaceConfig, KeyType, ThConfig, default_cells_per_mutex,
206 default_mutex_count, default_value_cache_size,
207 };
208 let mutexes = default_mutex_count() * 4;
209 let u64_sequence_key = KeyType::from_prefix_bits(6 * 8);
210 let override_dirty_keys_config = KeySpaceConfig::new()
211 .with_max_dirty_keys(64_000)
212 .with_value_cache_size(default_value_cache_size());
213 let config_u64 = ThConfig::new_with_config(
214 8,
215 mutexes,
216 u64_sequence_key,
217 override_dirty_keys_config.clone(),
218 );
219 let digest_config = ThConfig::new_with_rm_prefix(
220 32,
221 mutexes,
222 KeyType::uniform(default_cells_per_mutex()),
223 KeySpaceConfig::default(),
224 vec![0, 0, 0, 0, 0, 0, 0, 32],
225 );
226 let watermarks_config = KeySpaceConfig::new()
227 .with_value_cache_size(10)
228 .disable_unload();
229 let lru_config = KeySpaceConfig::new().with_value_cache_size(default_value_cache_size());
230 let configs = vec![
231 (
232 "checkpoint_content",
233 digest_config.clone().with_config(
234 lru_config
235 .clone()
236 .with_relocation_filter(|_, _| Decision::Remove),
237 ),
238 ),
239 (
240 "checkpoint_sequence_by_contents_digest",
241 digest_config.clone().with_config(apply_relocation_filter(
242 KeySpaceConfig::default(),
243 pruner_watermarks.checkpoint_id.clone(),
244 |sequence_number: CheckpointSequenceNumber| sequence_number,
245 false,
246 )),
247 ),
248 (
249 "full_checkpoint_content",
250 config_u64.clone().with_config(apply_relocation_filter(
251 override_dirty_keys_config.clone(),
252 pruner_watermarks.checkpoint_id.clone(),
253 |sequence_number: CheckpointSequenceNumber| sequence_number,
254 true,
255 )),
256 ),
257 ("certified_checkpoints", config_u64.clone()),
258 (
259 "checkpoint_by_digest",
260 digest_config.clone().with_config(apply_relocation_filter(
261 lru_config,
262 pruner_watermarks.epoch_id.clone(),
263 |checkpoint: TrustedCheckpoint| checkpoint.inner().epoch,
264 false,
265 )),
266 ),
267 (
268 "locally_computed_checkpoints",
269 config_u64.clone().with_config(apply_relocation_filter(
270 override_dirty_keys_config.clone(),
271 pruner_watermarks.checkpoint_id.clone(),
272 |checkpoint_id: CheckpointSequenceNumber| checkpoint_id,
273 true,
274 )),
275 ),
276 ("epoch_last_checkpoint_map", config_u64.clone()),
277 (
278 "watermarks",
279 ThConfig::new_with_config(4, 1, KeyType::uniform(1), watermarks_config.clone()),
280 ),
281 (
282 "transaction_fork_detected",
283 ThConfig::new_with_config(
284 1,
285 1,
286 KeyType::uniform(1),
287 watermarks_config.with_relocation_filter(|_, _| Decision::Remove),
288 ),
289 ),
290 (
291 "full_checkpoint_content_v2",
292 config_u64.clone().with_config(apply_relocation_filter(
293 override_dirty_keys_config.clone(),
294 pruner_watermarks.checkpoint_id.clone(),
295 |sequence_number: CheckpointSequenceNumber| sequence_number,
296 true,
297 )),
298 ),
299 ];
300 Self::open_tables_read_write(
301 path.to_path_buf(),
302 MetricConf::new(metric_name),
303 configs
304 .into_iter()
305 .map(|(cf, config)| (cf.to_string(), config))
306 .collect(),
307 )
308 }
309
310 pub fn open_readonly(path: &Path) -> CheckpointStoreTablesReadOnly {
311 Self::get_read_only_handle(
312 path.to_path_buf(),
313 None,
314 None,
315 MetricConf::new("checkpoint_readonly"),
316 )
317 }
318}
319
320pub struct CheckpointStore {
321 pub(crate) tables: CheckpointStoreTables,
322 synced_checkpoint_notify_read: NotifyRead<CheckpointSequenceNumber, VerifiedCheckpoint>,
323 executed_checkpoint_notify_read: NotifyRead<CheckpointSequenceNumber, VerifiedCheckpoint>,
324}
325
326impl CheckpointStore {
327 pub fn new(path: &Path, pruner_watermarks: Arc<PrunerWatermarks>) -> Arc<Self> {
328 let tables = CheckpointStoreTables::new(path, "checkpoint", pruner_watermarks);
329 Arc::new(Self {
330 tables,
331 synced_checkpoint_notify_read: NotifyRead::new(),
332 executed_checkpoint_notify_read: NotifyRead::new(),
333 })
334 }
335
336 pub fn new_for_tests() -> Arc<Self> {
337 let ckpt_dir = mysten_common::tempdir().unwrap();
338 CheckpointStore::new(ckpt_dir.path(), Arc::new(PrunerWatermarks::default()))
339 }
340
341 pub fn new_for_db_checkpoint_handler(path: &Path) -> Arc<Self> {
342 let tables = CheckpointStoreTables::new(
343 path,
344 "db_checkpoint",
345 Arc::new(PrunerWatermarks::default()),
346 );
347 Arc::new(Self {
348 tables,
349 synced_checkpoint_notify_read: NotifyRead::new(),
350 executed_checkpoint_notify_read: NotifyRead::new(),
351 })
352 }
353
354 pub fn open_readonly(path: &Path) -> CheckpointStoreTablesReadOnly {
355 CheckpointStoreTables::open_readonly(path)
356 }
357
358 #[instrument(level = "info", skip_all)]
359 pub fn insert_genesis_checkpoint(
360 &self,
361 checkpoint: VerifiedCheckpoint,
362 contents: CheckpointContents,
363 epoch_store: &AuthorityPerEpochStore,
364 ) {
365 assert_eq!(
366 checkpoint.epoch(),
367 0,
368 "can't call insert_genesis_checkpoint with a checkpoint not in epoch 0"
369 );
370 assert_eq!(
371 *checkpoint.sequence_number(),
372 0,
373 "can't call insert_genesis_checkpoint with a checkpoint that doesn't have a sequence number of 0"
374 );
375
376 match self.get_checkpoint_by_sequence_number(0).unwrap() {
378 Some(existing_checkpoint) => {
379 assert_eq!(existing_checkpoint.digest(), checkpoint.digest())
380 }
381 None => {
382 if epoch_store.epoch() == checkpoint.epoch {
383 epoch_store
384 .put_genesis_checkpoint_in_builder(checkpoint.data(), &contents)
385 .unwrap();
386 } else {
387 debug!(
388 validator_epoch =% epoch_store.epoch(),
389 genesis_epoch =% checkpoint.epoch(),
390 "Not inserting checkpoint builder data for genesis checkpoint",
391 );
392 }
393 self.insert_checkpoint_contents(contents).unwrap();
394 self.insert_verified_checkpoint(&checkpoint).unwrap();
395 self.update_highest_synced_checkpoint(&checkpoint).unwrap();
396 }
397 }
398 }
399
400 pub fn get_checkpoint_by_digest(
401 &self,
402 digest: &CheckpointDigest,
403 ) -> Result<Option<VerifiedCheckpoint>, TypedStoreError> {
404 self.tables
405 .checkpoint_by_digest
406 .get(digest)
407 .map(|maybe_checkpoint| maybe_checkpoint.map(|c| c.into()))
408 }
409
410 pub fn get_checkpoint_by_sequence_number(
411 &self,
412 sequence_number: CheckpointSequenceNumber,
413 ) -> Result<Option<VerifiedCheckpoint>, TypedStoreError> {
414 self.tables
415 .certified_checkpoints
416 .get(&sequence_number)
417 .map(|maybe_checkpoint| maybe_checkpoint.map(|c| c.into()))
418 }
419
420 pub fn get_locally_computed_checkpoint(
421 &self,
422 sequence_number: CheckpointSequenceNumber,
423 ) -> Result<Option<CheckpointSummary>, TypedStoreError> {
424 self.tables
425 .locally_computed_checkpoints
426 .get(&sequence_number)
427 }
428
429 pub fn multi_get_locally_computed_checkpoints(
430 &self,
431 sequence_numbers: &[CheckpointSequenceNumber],
432 ) -> Result<Vec<Option<CheckpointSummary>>, TypedStoreError> {
433 let checkpoints = self
434 .tables
435 .locally_computed_checkpoints
436 .multi_get(sequence_numbers)?;
437
438 Ok(checkpoints)
439 }
440
441 pub fn get_sequence_number_by_contents_digest(
442 &self,
443 digest: &CheckpointContentsDigest,
444 ) -> Result<Option<CheckpointSequenceNumber>, TypedStoreError> {
445 self.tables
446 .checkpoint_sequence_by_contents_digest
447 .get(digest)
448 }
449
450 pub fn delete_contents_digest_sequence_number_mapping(
451 &self,
452 digest: &CheckpointContentsDigest,
453 ) -> Result<(), TypedStoreError> {
454 self.tables
455 .checkpoint_sequence_by_contents_digest
456 .remove(digest)
457 }
458
459 pub fn get_latest_certified_checkpoint(
460 &self,
461 ) -> Result<Option<VerifiedCheckpoint>, TypedStoreError> {
462 Ok(self
463 .tables
464 .certified_checkpoints
465 .reversed_safe_iter_with_bounds(None, None)?
466 .next()
467 .transpose()?
468 .map(|(_, v)| v.into()))
469 }
470
471 pub fn get_latest_locally_computed_checkpoint(
472 &self,
473 ) -> Result<Option<CheckpointSummary>, TypedStoreError> {
474 Ok(self
475 .tables
476 .locally_computed_checkpoints
477 .reversed_safe_iter_with_bounds(None, None)?
478 .next()
479 .transpose()?
480 .map(|(_, v)| v))
481 }
482
483 pub fn multi_get_checkpoint_by_sequence_number(
484 &self,
485 sequence_numbers: &[CheckpointSequenceNumber],
486 ) -> Result<Vec<Option<VerifiedCheckpoint>>, TypedStoreError> {
487 let checkpoints = self
488 .tables
489 .certified_checkpoints
490 .multi_get(sequence_numbers)?
491 .into_iter()
492 .map(|maybe_checkpoint| maybe_checkpoint.map(|c| c.into()))
493 .collect();
494
495 Ok(checkpoints)
496 }
497
498 pub fn multi_get_checkpoint_content(
499 &self,
500 contents_digest: &[CheckpointContentsDigest],
501 ) -> Result<Vec<Option<CheckpointContents>>, TypedStoreError> {
502 self.tables.checkpoint_content.multi_get(contents_digest)
503 }
504
505 pub fn get_highest_verified_checkpoint(
506 &self,
507 ) -> Result<Option<VerifiedCheckpoint>, TypedStoreError> {
508 let highest_verified = if let Some(highest_verified) = self
509 .tables
510 .watermarks
511 .get(&CheckpointWatermark::HighestVerified)?
512 {
513 highest_verified
514 } else {
515 return Ok(None);
516 };
517 self.get_checkpoint_by_digest(&highest_verified.1)
518 }
519
520 pub fn get_highest_synced_checkpoint(
521 &self,
522 ) -> Result<Option<VerifiedCheckpoint>, TypedStoreError> {
523 let highest_synced = if let Some(highest_synced) = self
524 .tables
525 .watermarks
526 .get(&CheckpointWatermark::HighestSynced)?
527 {
528 highest_synced
529 } else {
530 return Ok(None);
531 };
532 self.get_checkpoint_by_digest(&highest_synced.1)
533 }
534
535 pub fn get_highest_synced_checkpoint_seq_number(
536 &self,
537 ) -> Result<Option<CheckpointSequenceNumber>, TypedStoreError> {
538 if let Some(highest_synced) = self
539 .tables
540 .watermarks
541 .get(&CheckpointWatermark::HighestSynced)?
542 {
543 Ok(Some(highest_synced.0))
544 } else {
545 Ok(None)
546 }
547 }
548
549 pub fn get_highest_executed_checkpoint_seq_number(
550 &self,
551 ) -> Result<Option<CheckpointSequenceNumber>, TypedStoreError> {
552 if let Some(highest_executed) = self
553 .tables
554 .watermarks
555 .get(&CheckpointWatermark::HighestExecuted)?
556 {
557 Ok(Some(highest_executed.0))
558 } else {
559 Ok(None)
560 }
561 }
562
563 pub fn get_highest_executed_checkpoint(
564 &self,
565 ) -> Result<Option<VerifiedCheckpoint>, TypedStoreError> {
566 let highest_executed = if let Some(highest_executed) = self
567 .tables
568 .watermarks
569 .get(&CheckpointWatermark::HighestExecuted)?
570 {
571 highest_executed
572 } else {
573 return Ok(None);
574 };
575 self.get_checkpoint_by_digest(&highest_executed.1)
576 }
577
578 pub fn get_highest_pruned_checkpoint_seq_number(
579 &self,
580 ) -> Result<Option<CheckpointSequenceNumber>, TypedStoreError> {
581 self.tables
582 .watermarks
583 .get(&CheckpointWatermark::HighestPruned)
584 .map(|watermark| watermark.map(|w| w.0))
585 }
586
587 pub fn get_checkpoint_contents(
588 &self,
589 digest: &CheckpointContentsDigest,
590 ) -> Result<Option<CheckpointContents>, TypedStoreError> {
591 self.tables.checkpoint_content.get(digest)
592 }
593
594 pub fn get_full_checkpoint_contents_by_sequence_number(
595 &self,
596 seq: CheckpointSequenceNumber,
597 ) -> Result<Option<VersionedFullCheckpointContents>, TypedStoreError> {
598 self.tables.full_checkpoint_content_v2.get(&seq)
599 }
600
601 fn prune_local_summaries(&self) -> SuiResult {
602 if let Some((last_local_summary, _)) = self
603 .tables
604 .locally_computed_checkpoints
605 .reversed_safe_iter_with_bounds(None, None)?
606 .next()
607 .transpose()?
608 {
609 let mut batch = self.tables.locally_computed_checkpoints.batch();
610 batch.schedule_delete_range(
611 &self.tables.locally_computed_checkpoints,
612 &0,
613 &last_local_summary,
614 )?;
615 batch.write()?;
616 info!("Pruned local summaries up to {:?}", last_local_summary);
617 }
618 Ok(())
619 }
620
621 pub fn clear_locally_computed_checkpoints_from(
622 &self,
623 from_seq: CheckpointSequenceNumber,
624 ) -> SuiResult {
625 let keys: Vec<_> = self
626 .tables
627 .locally_computed_checkpoints
628 .safe_iter_with_bounds(Some(from_seq), None)
629 .map(|r| r.map(|(k, _)| k))
630 .collect::<Result<_, _>>()?;
631 if let Some(&last_local_summary) = keys.last() {
632 let mut batch = self.tables.locally_computed_checkpoints.batch();
633 batch
634 .delete_batch(&self.tables.locally_computed_checkpoints, keys.iter())
635 .expect("Failed to delete locally computed checkpoints");
636 batch
637 .write()
638 .expect("Failed to delete locally computed checkpoints");
639 warn!(
640 from_seq,
641 last_local_summary,
642 "Cleared locally_computed_checkpoints from {} (inclusive) through {} (inclusive)",
643 from_seq,
644 last_local_summary
645 );
646 }
647 Ok(())
648 }
649
650 fn check_for_checkpoint_fork(
651 &self,
652 local_checkpoint: &CheckpointSummary,
653 verified_checkpoint: &VerifiedCheckpoint,
654 ) {
655 if local_checkpoint != verified_checkpoint.data() {
656 let verified_contents = self
657 .get_checkpoint_contents(&verified_checkpoint.content_digest)
658 .map(|opt_contents| {
659 opt_contents
660 .map(|contents| format!("{:?}", contents))
661 .unwrap_or_else(|| {
662 format!(
663 "Verified checkpoint contents not found, digest: {:?}",
664 verified_checkpoint.content_digest,
665 )
666 })
667 })
668 .map_err(|e| {
669 format!(
670 "Failed to get verified checkpoint contents, digest: {:?} error: {:?}",
671 verified_checkpoint.content_digest, e
672 )
673 })
674 .unwrap_or_else(|err_msg| err_msg);
675
676 let local_contents = self
677 .get_checkpoint_contents(&local_checkpoint.content_digest)
678 .map(|opt_contents| {
679 opt_contents
680 .map(|contents| format!("{:?}", contents))
681 .unwrap_or_else(|| {
682 format!(
683 "Local checkpoint contents not found, digest: {:?}",
684 local_checkpoint.content_digest
685 )
686 })
687 })
688 .map_err(|e| {
689 format!(
690 "Failed to get local checkpoint contents, digest: {:?} error: {:?}",
691 local_checkpoint.content_digest, e
692 )
693 })
694 .unwrap_or_else(|err_msg| err_msg);
695
696 error!(
698 verified_checkpoint = ?verified_checkpoint.data(),
699 ?verified_contents,
700 ?local_checkpoint,
701 ?local_contents,
702 "Local checkpoint fork detected!",
703 );
704
705 if let Err(e) = self.record_checkpoint_fork_detected(
707 *local_checkpoint.sequence_number(),
708 local_checkpoint.digest(),
709 ) {
710 error!("Failed to record checkpoint fork in database: {:?}", e);
711 }
712
713 fail_point_arg!(
714 "kill_checkpoint_fork_node",
715 |checkpoint_overrides: std::sync::Arc<
716 std::sync::Mutex<std::collections::BTreeMap<u64, String>>,
717 >| {
718 #[cfg(msim)]
719 {
720 if let Ok(mut overrides) = checkpoint_overrides.lock() {
721 overrides.insert(
722 local_checkpoint.sequence_number,
723 verified_checkpoint.digest().to_string(),
724 );
725 }
726 tracing::error!(
727 fatal = true,
728 "Fork recovery test: killing node due to checkpoint fork for sequence number: {}, using verified digest: {}",
729 local_checkpoint.sequence_number(),
730 verified_checkpoint.digest()
731 );
732 sui_simulator::task::shutdown_current_node();
733 }
734 }
735 );
736
737 fatal!(
738 "Local checkpoint fork detected for sequence number: {}",
739 local_checkpoint.sequence_number()
740 );
741 }
742 }
743
744 pub fn insert_certified_checkpoint(
750 &self,
751 checkpoint: &VerifiedCheckpoint,
752 ) -> Result<(), TypedStoreError> {
753 debug!(
754 checkpoint_seq = checkpoint.sequence_number(),
755 "Inserting certified checkpoint",
756 );
757 let mut batch = self.tables.certified_checkpoints.batch();
758 batch
759 .insert_batch(
760 &self.tables.certified_checkpoints,
761 [(checkpoint.sequence_number(), checkpoint.serializable_ref())],
762 )?
763 .insert_batch(
764 &self.tables.checkpoint_by_digest,
765 [(checkpoint.digest(), checkpoint.serializable_ref())],
766 )?;
767 if checkpoint.next_epoch_committee().is_some() {
768 batch.insert_batch(
769 &self.tables.epoch_last_checkpoint_map,
770 [(&checkpoint.epoch(), checkpoint.sequence_number())],
771 )?;
772 }
773 batch.write()?;
774
775 if let Some(local_checkpoint) = self
776 .tables
777 .locally_computed_checkpoints
778 .get(checkpoint.sequence_number())?
779 {
780 self.check_for_checkpoint_fork(&local_checkpoint, checkpoint);
781 }
782
783 Ok(())
784 }
785
786 #[instrument(level = "debug", skip_all)]
789 pub fn insert_verified_checkpoint(
790 &self,
791 checkpoint: &VerifiedCheckpoint,
792 ) -> Result<(), TypedStoreError> {
793 self.insert_certified_checkpoint(checkpoint)?;
794 self.update_highest_verified_checkpoint(checkpoint)
795 }
796
797 pub fn update_highest_verified_checkpoint(
798 &self,
799 checkpoint: &VerifiedCheckpoint,
800 ) -> Result<(), TypedStoreError> {
801 if Some(*checkpoint.sequence_number())
802 > self
803 .get_highest_verified_checkpoint()?
804 .map(|x| *x.sequence_number())
805 {
806 debug!(
807 checkpoint_seq = checkpoint.sequence_number(),
808 "Updating highest verified checkpoint",
809 );
810 self.tables.watermarks.insert(
811 &CheckpointWatermark::HighestVerified,
812 &(*checkpoint.sequence_number(), *checkpoint.digest()),
813 )?;
814 }
815
816 Ok(())
817 }
818
819 pub fn update_highest_synced_checkpoint(
820 &self,
821 checkpoint: &VerifiedCheckpoint,
822 ) -> Result<(), TypedStoreError> {
823 let seq = *checkpoint.sequence_number();
824 debug!(checkpoint_seq = seq, "Updating highest synced checkpoint",);
825 self.tables.watermarks.insert(
826 &CheckpointWatermark::HighestSynced,
827 &(seq, *checkpoint.digest()),
828 )?;
829 self.synced_checkpoint_notify_read.notify(&seq, checkpoint);
830 Ok(())
831 }
832
833 async fn notify_read_checkpoint_watermark<F>(
834 &self,
835 notify_read: &NotifyRead<CheckpointSequenceNumber, VerifiedCheckpoint>,
836 seq: CheckpointSequenceNumber,
837 get_watermark: F,
838 ) -> VerifiedCheckpoint
839 where
840 F: Fn() -> Option<CheckpointSequenceNumber>,
841 {
842 notify_read
843 .read("notify_read_checkpoint_watermark", &[seq], |seqs| {
844 let seq = seqs[0];
845 let Some(highest) = get_watermark() else {
846 return vec![None];
847 };
848 if highest < seq {
849 return vec![None];
850 }
851 let checkpoint = self
852 .get_checkpoint_by_sequence_number(seq)
853 .expect("db error")
854 .expect("checkpoint not found");
855 vec![Some(checkpoint)]
856 })
857 .await
858 .into_iter()
859 .next()
860 .unwrap()
861 }
862
863 pub async fn notify_read_synced_checkpoint(
864 &self,
865 seq: CheckpointSequenceNumber,
866 ) -> VerifiedCheckpoint {
867 self.notify_read_checkpoint_watermark(&self.synced_checkpoint_notify_read, seq, || {
868 self.get_highest_synced_checkpoint_seq_number()
869 .expect("db error")
870 })
871 .await
872 }
873
874 pub async fn notify_read_executed_checkpoint(
875 &self,
876 seq: CheckpointSequenceNumber,
877 ) -> VerifiedCheckpoint {
878 self.notify_read_checkpoint_watermark(&self.executed_checkpoint_notify_read, seq, || {
879 self.get_highest_executed_checkpoint_seq_number()
880 .expect("db error")
881 })
882 .await
883 }
884
885 pub fn update_highest_executed_checkpoint(
886 &self,
887 checkpoint: &VerifiedCheckpoint,
888 ) -> Result<(), TypedStoreError> {
889 if let Some(seq_number) = self.get_highest_executed_checkpoint_seq_number()? {
890 if seq_number >= *checkpoint.sequence_number() {
891 return Ok(());
892 }
893 assert_eq!(
894 seq_number + 1,
895 *checkpoint.sequence_number(),
896 "Cannot update highest executed checkpoint to {} when current highest executed checkpoint is {}",
897 checkpoint.sequence_number(),
898 seq_number
899 );
900 }
901 let seq = *checkpoint.sequence_number();
902 debug!(checkpoint_seq = seq, "Updating highest executed checkpoint",);
903 self.tables.watermarks.insert(
904 &CheckpointWatermark::HighestExecuted,
905 &(seq, *checkpoint.digest()),
906 )?;
907 self.executed_checkpoint_notify_read
908 .notify(&seq, checkpoint);
909 Ok(())
910 }
911
912 pub fn update_highest_pruned_checkpoint(
913 &self,
914 checkpoint: &VerifiedCheckpoint,
915 ) -> Result<(), TypedStoreError> {
916 self.tables.watermarks.insert(
917 &CheckpointWatermark::HighestPruned,
918 &(*checkpoint.sequence_number(), *checkpoint.digest()),
919 )
920 }
921
922 pub fn set_highest_executed_checkpoint_subtle(
927 &self,
928 checkpoint: &VerifiedCheckpoint,
929 ) -> Result<(), TypedStoreError> {
930 self.tables.watermarks.insert(
931 &CheckpointWatermark::HighestExecuted,
932 &(*checkpoint.sequence_number(), *checkpoint.digest()),
933 )
934 }
935
936 pub fn insert_checkpoint_contents(
937 &self,
938 contents: CheckpointContents,
939 ) -> Result<(), TypedStoreError> {
940 debug!(
941 checkpoint_seq = ?contents.digest(),
942 "Inserting checkpoint contents",
943 );
944 self.tables
945 .checkpoint_content
946 .insert(contents.digest(), &contents)
947 }
948
949 pub fn insert_verified_checkpoint_contents(
950 &self,
951 checkpoint: &VerifiedCheckpoint,
952 full_contents: VerifiedCheckpointContents,
953 ) -> Result<(), TypedStoreError> {
954 let mut batch = self.tables.full_checkpoint_content_v2.batch();
955 batch.insert_batch(
956 &self.tables.checkpoint_sequence_by_contents_digest,
957 [(&checkpoint.content_digest, checkpoint.sequence_number())],
958 )?;
959 let full_contents = full_contents.into_inner();
960 batch.insert_batch(
961 &self.tables.full_checkpoint_content_v2,
962 [(checkpoint.sequence_number(), &full_contents)],
963 )?;
964
965 let contents = full_contents.into_checkpoint_contents();
966 assert_eq!(&checkpoint.content_digest, contents.digest());
967
968 batch.insert_batch(
969 &self.tables.checkpoint_content,
970 [(contents.digest(), &contents)],
971 )?;
972
973 batch.write()
974 }
975
976 pub fn delete_full_checkpoint_contents(
977 &self,
978 seq: CheckpointSequenceNumber,
979 ) -> Result<(), TypedStoreError> {
980 self.tables.full_checkpoint_content.remove(&seq)?;
981 self.tables.full_checkpoint_content_v2.remove(&seq)
982 }
983
984 pub fn get_epoch_last_checkpoint(
985 &self,
986 epoch_id: EpochId,
987 ) -> SuiResult<Option<VerifiedCheckpoint>> {
988 let seq = self.get_epoch_last_checkpoint_seq_number(epoch_id)?;
989 let checkpoint = match seq {
990 Some(seq) => self.get_checkpoint_by_sequence_number(seq)?,
991 None => None,
992 };
993 Ok(checkpoint)
994 }
995
996 pub fn get_epoch_last_checkpoint_seq_number(
997 &self,
998 epoch_id: EpochId,
999 ) -> SuiResult<Option<CheckpointSequenceNumber>> {
1000 let seq = self.tables.epoch_last_checkpoint_map.get(&epoch_id)?;
1001 Ok(seq)
1002 }
1003
1004 pub fn insert_epoch_last_checkpoint(
1005 &self,
1006 epoch_id: EpochId,
1007 checkpoint: &VerifiedCheckpoint,
1008 ) -> SuiResult {
1009 self.tables
1010 .epoch_last_checkpoint_map
1011 .insert(&epoch_id, checkpoint.sequence_number())?;
1012 Ok(())
1013 }
1014
1015 pub fn get_epoch_state_commitments(
1016 &self,
1017 epoch: EpochId,
1018 ) -> SuiResult<Option<Vec<CheckpointCommitment>>> {
1019 let commitments = self.get_epoch_last_checkpoint(epoch)?.map(|checkpoint| {
1020 checkpoint
1021 .end_of_epoch_data
1022 .as_ref()
1023 .expect("Last checkpoint of epoch expected to have EndOfEpochData")
1024 .epoch_commitments
1025 .clone()
1026 });
1027 Ok(commitments)
1028 }
1029
1030 pub fn get_epoch_stats(
1032 &self,
1033 epoch: EpochId,
1034 last_checkpoint: &CheckpointSummary,
1035 ) -> Option<EpochStats> {
1036 let (first_checkpoint, prev_epoch_network_transactions) = if epoch == 0 {
1037 (0, 0)
1038 } else if let Ok(Some(checkpoint)) = self.get_epoch_last_checkpoint(epoch - 1) {
1039 (
1040 checkpoint.sequence_number + 1,
1041 checkpoint.network_total_transactions,
1042 )
1043 } else {
1044 return None;
1045 };
1046 Some(EpochStats {
1047 checkpoint_count: last_checkpoint.sequence_number - first_checkpoint + 1,
1048 transaction_count: last_checkpoint.network_total_transactions
1049 - prev_epoch_network_transactions,
1050 total_gas_reward: last_checkpoint
1051 .epoch_rolling_gas_cost_summary
1052 .computation_cost,
1053 })
1054 }
1055
1056 pub fn checkpoint_db(&self, path: &Path) -> SuiResult {
1057 self.tables
1059 .checkpoint_content
1060 .checkpoint_db(path)
1061 .map_err(Into::into)
1062 }
1063
1064 pub fn delete_highest_executed_checkpoint_test_only(&self) -> Result<(), TypedStoreError> {
1065 let mut wb = self.tables.watermarks.batch();
1066 wb.delete_batch(
1067 &self.tables.watermarks,
1068 std::iter::once(CheckpointWatermark::HighestExecuted),
1069 )?;
1070 wb.write()?;
1071 Ok(())
1072 }
1073
1074 pub fn reset_db_for_execution_since_genesis(&self) -> SuiResult {
1075 self.delete_highest_executed_checkpoint_test_only()?;
1076 Ok(())
1077 }
1078
1079 pub fn record_checkpoint_fork_detected(
1080 &self,
1081 checkpoint_seq: CheckpointSequenceNumber,
1082 checkpoint_digest: CheckpointDigest,
1083 ) -> Result<(), TypedStoreError> {
1084 info!(
1085 checkpoint_seq = checkpoint_seq,
1086 checkpoint_digest = ?checkpoint_digest,
1087 "Recording checkpoint fork detection in database"
1088 );
1089 self.tables.watermarks.insert(
1090 &CheckpointWatermark::CheckpointForkDetected,
1091 &(checkpoint_seq, checkpoint_digest),
1092 )
1093 }
1094
1095 pub fn get_checkpoint_fork_detected(
1096 &self,
1097 ) -> Result<Option<(CheckpointSequenceNumber, CheckpointDigest)>, TypedStoreError> {
1098 self.tables
1099 .watermarks
1100 .get(&CheckpointWatermark::CheckpointForkDetected)
1101 }
1102
1103 pub fn clear_checkpoint_fork_detected(&self) -> Result<(), TypedStoreError> {
1104 self.tables
1105 .watermarks
1106 .remove(&CheckpointWatermark::CheckpointForkDetected)
1107 }
1108
1109 pub fn record_transaction_fork_detected(
1110 &self,
1111 tx_digest: TransactionDigest,
1112 expected_effects_digest: TransactionEffectsDigest,
1113 actual_effects_digest: TransactionEffectsDigest,
1114 ) -> Result<(), TypedStoreError> {
1115 info!(
1116 tx_digest = ?tx_digest,
1117 expected_effects_digest = ?expected_effects_digest,
1118 actual_effects_digest = ?actual_effects_digest,
1119 "Recording transaction fork detection in database"
1120 );
1121 self.tables.transaction_fork_detected.insert(
1122 &TRANSACTION_FORK_DETECTED_KEY,
1123 &(tx_digest, expected_effects_digest, actual_effects_digest),
1124 )
1125 }
1126
1127 pub fn get_transaction_fork_detected(
1128 &self,
1129 ) -> Result<
1130 Option<(
1131 TransactionDigest,
1132 TransactionEffectsDigest,
1133 TransactionEffectsDigest,
1134 )>,
1135 TypedStoreError,
1136 > {
1137 self.tables
1138 .transaction_fork_detected
1139 .get(&TRANSACTION_FORK_DETECTED_KEY)
1140 }
1141
1142 pub fn clear_transaction_fork_detected(&self) -> Result<(), TypedStoreError> {
1143 self.tables
1144 .transaction_fork_detected
1145 .remove(&TRANSACTION_FORK_DETECTED_KEY)
1146 }
1147}
1148
1149#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
1150pub enum CheckpointWatermark {
1151 HighestVerified,
1152 HighestSynced,
1153 HighestExecuted,
1154 HighestPruned,
1155 CheckpointForkDetected,
1156}
1157
1158struct CheckpointStateHasher {
1159 epoch_store: Arc<AuthorityPerEpochStore>,
1160 hasher: Weak<GlobalStateHasher>,
1161 receive_from_builder: mpsc::Receiver<(CheckpointSequenceNumber, Vec<TransactionEffects>)>,
1162}
1163
1164impl CheckpointStateHasher {
1165 fn new(
1166 epoch_store: Arc<AuthorityPerEpochStore>,
1167 hasher: Weak<GlobalStateHasher>,
1168 receive_from_builder: mpsc::Receiver<(CheckpointSequenceNumber, Vec<TransactionEffects>)>,
1169 ) -> Self {
1170 Self {
1171 epoch_store,
1172 hasher,
1173 receive_from_builder,
1174 }
1175 }
1176
1177 async fn run(self) {
1178 let Self {
1179 epoch_store,
1180 hasher,
1181 mut receive_from_builder,
1182 } = self;
1183 while let Some((seq, effects)) = receive_from_builder.recv().await {
1184 let Some(hasher) = hasher.upgrade() else {
1185 info!("Object state hasher was dropped, stopping checkpoint accumulation");
1186 break;
1187 };
1188 hasher
1189 .accumulate_checkpoint(&effects, seq, &epoch_store)
1190 .expect("epoch ended while accumulating checkpoint");
1191 }
1192 }
1193}
1194
1195#[derive(Debug)]
1196pub(crate) enum CheckpointBuilderError {
1197 ChangeEpochTxAlreadyExecuted,
1198 SystemPackagesMissing,
1199 Retry(anyhow::Error),
1200}
1201
1202impl<SuiError: std::error::Error + Send + Sync + 'static> From<SuiError>
1203 for CheckpointBuilderError
1204{
1205 fn from(e: SuiError) -> Self {
1206 Self::Retry(e.into())
1207 }
1208}
1209
1210pub(crate) type CheckpointBuilderResult<T = ()> = Result<T, CheckpointBuilderError>;
1211
1212pub struct CheckpointBuilder {
1213 state: Arc<AuthorityState>,
1214 store: Arc<CheckpointStore>,
1215 epoch_store: Arc<AuthorityPerEpochStore>,
1216 notify: Arc<Notify>,
1217 notify_aggregator: Arc<Notify>,
1218 last_built: watch::Sender<CheckpointSequenceNumber>,
1219 effects_store: Arc<dyn TransactionCacheRead>,
1220 global_state_hasher: Weak<GlobalStateHasher>,
1221 send_to_hasher: mpsc::Sender<(CheckpointSequenceNumber, Vec<TransactionEffects>)>,
1222 output: Box<dyn CheckpointOutput>,
1223 metrics: Arc<CheckpointMetrics>,
1224 max_transactions_per_checkpoint: usize,
1225 max_checkpoint_size_bytes: usize,
1226}
1227
1228pub struct CheckpointAggregator {
1229 store: Arc<CheckpointStore>,
1230 epoch_store: Arc<AuthorityPerEpochStore>,
1231 notify: Arc<Notify>,
1232 current: Option<CheckpointSignatureAggregator>,
1233 output: Box<dyn CertifiedCheckpointOutput>,
1234 state: Arc<AuthorityState>,
1235 metrics: Arc<CheckpointMetrics>,
1236}
1237
1238pub struct CheckpointSignatureAggregator {
1240 next_index: u64,
1241 summary: CheckpointSummary,
1242 digest: CheckpointDigest,
1243 signatures_by_digest: MultiStakeAggregator<CheckpointDigest, CheckpointSummary, true>,
1245 store: Arc<CheckpointStore>,
1246 state: Arc<AuthorityState>,
1247 metrics: Arc<CheckpointMetrics>,
1248}
1249
1250impl CheckpointBuilder {
1251 fn new(
1252 state: Arc<AuthorityState>,
1253 store: Arc<CheckpointStore>,
1254 epoch_store: Arc<AuthorityPerEpochStore>,
1255 notify: Arc<Notify>,
1256 effects_store: Arc<dyn TransactionCacheRead>,
1257 global_state_hasher: Weak<GlobalStateHasher>,
1259 send_to_hasher: mpsc::Sender<(CheckpointSequenceNumber, Vec<TransactionEffects>)>,
1261 output: Box<dyn CheckpointOutput>,
1262 notify_aggregator: Arc<Notify>,
1263 last_built: watch::Sender<CheckpointSequenceNumber>,
1264 metrics: Arc<CheckpointMetrics>,
1265 max_transactions_per_checkpoint: usize,
1266 max_checkpoint_size_bytes: usize,
1267 ) -> Self {
1268 Self {
1269 state,
1270 store,
1271 epoch_store,
1272 notify,
1273 effects_store,
1274 global_state_hasher,
1275 send_to_hasher,
1276 output,
1277 notify_aggregator,
1278 last_built,
1279 metrics,
1280 max_transactions_per_checkpoint,
1281 max_checkpoint_size_bytes,
1282 }
1283 }
1284
1285 async fn run(mut self, consensus_replay_waiter: Option<ReplayWaiter>) {
1293 if let Some(replay_waiter) = consensus_replay_waiter {
1294 info!("Waiting for consensus commits to replay ...");
1295 replay_waiter.wait_for_replay().await;
1296 info!("Consensus commits finished replaying");
1297 }
1298 info!("Starting CheckpointBuilder");
1299 loop {
1300 match self.maybe_build_checkpoints().await {
1301 Ok(()) => {}
1302 err @ Err(
1303 CheckpointBuilderError::ChangeEpochTxAlreadyExecuted
1304 | CheckpointBuilderError::SystemPackagesMissing,
1305 ) => {
1306 info!("CheckpointBuilder stopping: {:?}", err);
1307 return;
1308 }
1309 Err(CheckpointBuilderError::Retry(inner)) => {
1310 let msg = format!("{:?}", inner);
1311 debug_fatal!("Error while making checkpoint, will retry in 1s: {}", msg);
1312 tokio::time::sleep(Duration::from_secs(1)).await;
1313 self.metrics.checkpoint_errors.inc();
1314 continue;
1315 }
1316 }
1317
1318 self.notify.notified().await;
1319 }
1320 }
1321
1322 async fn maybe_build_checkpoints(&mut self) -> CheckpointBuilderResult {
1323 let _scope = monitored_scope("BuildCheckpoints");
1324
1325 let summary = self
1327 .epoch_store
1328 .last_built_checkpoint_builder_summary()
1329 .expect("epoch should not have ended");
1330 let mut last_height = summary.clone().and_then(|s| s.checkpoint_height);
1331 let mut last_timestamp = summary.map(|s| s.summary.timestamp_ms);
1332
1333 let min_checkpoint_interval_ms = self
1334 .epoch_store
1335 .protocol_config()
1336 .min_checkpoint_interval_ms_as_option()
1337 .unwrap_or_default();
1338 let mut grouped_pending_checkpoints = Vec::new();
1339 let mut checkpoints_iter = self
1340 .epoch_store
1341 .get_pending_checkpoints(last_height)
1342 .expect("unexpected epoch store error")
1343 .into_iter()
1344 .peekable();
1345 while let Some((height, pending)) = checkpoints_iter.next() {
1346 let current_timestamp = pending.details().timestamp_ms;
1349 let can_build = match last_timestamp {
1350 Some(last_timestamp) => {
1351 current_timestamp >= last_timestamp + min_checkpoint_interval_ms
1352 }
1353 None => true,
1354 } || checkpoints_iter
1357 .peek()
1358 .is_some_and(|(_, next_pending)| next_pending.details().last_of_epoch)
1359 || pending.details().last_of_epoch;
1361 grouped_pending_checkpoints.push(pending);
1362 if !can_build {
1363 debug!(
1364 checkpoint_commit_height = height,
1365 ?last_timestamp,
1366 ?current_timestamp,
1367 "waiting for more PendingCheckpoints: minimum interval not yet elapsed"
1368 );
1369 continue;
1370 }
1371
1372 last_height = Some(height);
1374 last_timestamp = Some(current_timestamp);
1375 debug!(
1376 checkpoint_commit_height_from = grouped_pending_checkpoints
1377 .first()
1378 .unwrap()
1379 .details()
1380 .checkpoint_height,
1381 checkpoint_commit_height_to = last_height,
1382 "Making checkpoint with commit height range"
1383 );
1384
1385 let seq = self
1386 .make_checkpoint(std::mem::take(&mut grouped_pending_checkpoints))
1387 .await?;
1388
1389 self.last_built.send_if_modified(|cur| {
1390 if seq > *cur {
1392 *cur = seq;
1393 true
1394 } else {
1395 false
1396 }
1397 });
1398
1399 tokio::task::yield_now().await;
1402 }
1403 debug!(
1404 "Waiting for more checkpoints from consensus after processing {last_height:?}; {} pending checkpoints left unprocessed until next interval",
1405 grouped_pending_checkpoints.len(),
1406 );
1407
1408 Ok(())
1409 }
1410
1411 #[instrument(level = "debug", skip_all, fields(last_height = pendings.last().unwrap().details().checkpoint_height))]
1412 async fn make_checkpoint(
1413 &mut self,
1414 pendings: Vec<PendingCheckpoint>,
1415 ) -> CheckpointBuilderResult<CheckpointSequenceNumber> {
1416 let _scope = monitored_scope("CheckpointBuilder::make_checkpoint");
1417
1418 let pending_ckpt_str = pendings
1419 .iter()
1420 .map(|p| {
1421 format!(
1422 "height={}, commit={}",
1423 p.details().checkpoint_height,
1424 p.details().consensus_commit_ref
1425 )
1426 })
1427 .join("; ");
1428
1429 let last_details = pendings.last().unwrap().details().clone();
1430
1431 let highest_executed_sequence = self
1434 .store
1435 .get_highest_executed_checkpoint_seq_number()
1436 .expect("db error")
1437 .unwrap_or(0);
1438
1439 let (poll_count, result) = poll_count(self.resolve_checkpoint_transactions(pendings)).await;
1440 let (sorted_tx_effects_included_in_checkpoint, all_roots) = result?;
1441
1442 let new_checkpoints = self
1443 .create_checkpoints(
1444 sorted_tx_effects_included_in_checkpoint,
1445 &last_details,
1446 &all_roots,
1447 )
1448 .await?;
1449 let highest_sequence = *new_checkpoints.last().0.sequence_number();
1450 if highest_sequence <= highest_executed_sequence && poll_count > 1 {
1451 debug_fatal!(
1452 "resolve_checkpoint_transactions should be instantaneous when executed checkpoint is ahead of checkpoint builder"
1453 );
1454 }
1455
1456 let new_ckpt_str = new_checkpoints
1457 .iter()
1458 .map(|(ckpt, _)| format!("seq={}, digest={}", ckpt.sequence_number(), ckpt.digest()))
1459 .join("; ");
1460
1461 self.write_checkpoints(last_details.checkpoint_height, new_checkpoints)
1462 .await?;
1463 info!(
1464 "Made new checkpoint {} from pending checkpoint {}",
1465 new_ckpt_str, pending_ckpt_str
1466 );
1467
1468 Ok(highest_sequence)
1469 }
1470
1471 async fn construct_and_execute_settlement_transactions(
1472 &self,
1473 sorted_tx_effects_included_in_checkpoint: &[TransactionEffects],
1474 checkpoint_height: CheckpointHeight,
1475 checkpoint_seq: CheckpointSequenceNumber,
1476 tx_index_offset: u64,
1477 ) -> (TransactionKey, Vec<TransactionEffects>) {
1478 let _scope =
1479 monitored_scope("CheckpointBuilder::construct_and_execute_settlement_transactions");
1480
1481 let tx_key =
1482 TransactionKey::AccumulatorSettlement(self.epoch_store.epoch(), checkpoint_height);
1483
1484 let epoch = self.epoch_store.epoch();
1485 let accumulator_root_obj_initial_shared_version = self
1486 .epoch_store
1487 .epoch_start_config()
1488 .accumulator_root_obj_initial_shared_version()
1489 .expect("accumulator root object must exist");
1490
1491 let builder = AccumulatorSettlementTxBuilder::new(
1492 Some(self.effects_store.as_ref()),
1493 sorted_tx_effects_included_in_checkpoint,
1494 checkpoint_seq,
1495 tx_index_offset,
1496 );
1497
1498 let funds_changes = builder.collect_funds_changes();
1499 let num_updates = builder.num_updates();
1500 let settlement_txns = builder.build_tx(
1501 self.epoch_store.protocol_config(),
1502 epoch,
1503 accumulator_root_obj_initial_shared_version,
1504 checkpoint_height,
1505 checkpoint_seq,
1506 );
1507
1508 let settlement_txns: Vec<_> = settlement_txns
1509 .into_iter()
1510 .map(|tx| {
1511 VerifiedExecutableTransaction::new_system(
1512 VerifiedTransaction::new_system_transaction(tx),
1513 self.epoch_store.epoch(),
1514 )
1515 })
1516 .collect();
1517
1518 let settlement_digests: Vec<_> = settlement_txns.iter().map(|tx| *tx.digest()).collect();
1519
1520 debug!(
1521 ?settlement_digests,
1522 ?tx_key,
1523 "created settlement transactions with {num_updates} updates"
1524 );
1525
1526 self.epoch_store
1527 .notify_settlement_transactions_ready(tx_key, settlement_txns);
1528
1529 let settlement_effects = wait_for_effects_with_retry(
1530 self.effects_store.as_ref(),
1531 "CheckpointBuilder::notify_read_settlement_effects",
1532 &settlement_digests,
1533 tx_key,
1534 )
1535 .await;
1536
1537 let barrier_tx = accumulators::build_accumulator_barrier_tx(
1538 epoch,
1539 accumulator_root_obj_initial_shared_version,
1540 checkpoint_height,
1541 &settlement_effects,
1542 );
1543
1544 let barrier_tx = VerifiedExecutableTransaction::new_system(
1545 VerifiedTransaction::new_system_transaction(barrier_tx),
1546 self.epoch_store.epoch(),
1547 );
1548 let barrier_digest = *barrier_tx.digest();
1549
1550 self.epoch_store
1551 .notify_barrier_transaction_ready(tx_key, barrier_tx);
1552
1553 let barrier_effects = wait_for_effects_with_retry(
1554 self.effects_store.as_ref(),
1555 "CheckpointBuilder::notify_read_barrier_effects",
1556 &[barrier_digest],
1557 tx_key,
1558 )
1559 .await;
1560
1561 let settlement_effects: Vec<_> = settlement_effects
1562 .into_iter()
1563 .chain(barrier_effects)
1564 .collect();
1565
1566 let mut next_accumulator_version = None;
1567 for fx in settlement_effects.iter() {
1568 assert!(
1569 fx.status().is_ok(),
1570 "settlement transaction cannot fail (digest: {:?}) {:#?}",
1571 fx.transaction_digest(),
1572 fx
1573 );
1574 if let Some(version) = fx
1575 .mutated()
1576 .iter()
1577 .find_map(|(oref, _)| (oref.0 == SUI_ACCUMULATOR_ROOT_OBJECT_ID).then_some(oref.1))
1578 {
1579 assert!(
1580 next_accumulator_version.is_none(),
1581 "Only one settlement transaction should mutate the accumulator root object"
1582 );
1583 next_accumulator_version = Some(version);
1584 }
1585 }
1586 let settlements = FundsSettlement {
1587 next_accumulator_version: next_accumulator_version
1588 .expect("Accumulator root object should be mutated in the settlement transactions"),
1589 funds_changes,
1590 };
1591
1592 self.state
1593 .execution_scheduler()
1594 .settle_address_funds(settlements);
1595
1596 (tx_key, settlement_effects)
1597 }
1598
1599 #[instrument(level = "debug", skip_all)]
1604 async fn resolve_checkpoint_transactions(
1605 &self,
1606 pending_checkpoints: Vec<PendingCheckpoint>,
1607 ) -> SuiResult<(Vec<TransactionEffects>, HashSet<TransactionDigest>)> {
1608 let _scope = monitored_scope("CheckpointBuilder::resolve_checkpoint_transactions");
1609
1610 let mut effects_in_current_checkpoint = BTreeSet::new();
1615
1616 let mut tx_effects = Vec::new();
1617 let mut tx_roots = HashSet::new();
1618
1619 for pending_checkpoint in pending_checkpoints.into_iter() {
1620 let mut pending = pending_checkpoint;
1621 debug!(
1622 checkpoint_commit_height = pending.details.checkpoint_height,
1623 "Resolving checkpoint transactions for pending checkpoint.",
1624 );
1625
1626 trace!(
1627 "roots for pending checkpoint {:?}: {:?}",
1628 pending.details.checkpoint_height, pending.roots,
1629 );
1630
1631 let settlement_root = if self.epoch_store.accumulators_enabled() {
1632 let Some(settlement_root @ TransactionKey::AccumulatorSettlement(..)) =
1633 pending.roots.pop()
1634 else {
1635 fatal!("No settlement root found");
1636 };
1637 Some(settlement_root)
1638 } else {
1639 None
1640 };
1641
1642 let roots = &pending.roots;
1643
1644 self.metrics
1645 .checkpoint_roots_count
1646 .inc_by(roots.len() as u64);
1647
1648 let root_digests = self
1649 .epoch_store
1650 .notify_read_tx_key_to_digest(roots)
1651 .in_monitored_scope("CheckpointNotifyDigests")
1652 .await?;
1653 let root_effects = self
1654 .effects_store
1655 .notify_read_executed_effects(
1656 CHECKPOINT_BUILDER_NOTIFY_READ_TASK_NAME,
1657 &root_digests,
1658 )
1659 .in_monitored_scope("CheckpointNotifyRead")
1660 .await;
1661
1662 assert!(
1663 self.epoch_store
1664 .protocol_config()
1665 .prepend_prologue_tx_in_consensus_commit_in_checkpoints()
1666 );
1667
1668 let consensus_commit_prologue =
1671 self.extract_consensus_commit_prologue(&root_digests, &root_effects)?;
1672
1673 if let Some((ccp_digest, ccp_effects)) = &consensus_commit_prologue {
1676 let unsorted_ccp = self.complete_checkpoint_effects(
1677 vec![ccp_effects.clone()],
1678 &mut effects_in_current_checkpoint,
1679 )?;
1680
1681 if unsorted_ccp.len() != 1 {
1684 fatal!(
1685 "Expected 1 consensus commit prologue, got {:?}",
1686 unsorted_ccp
1687 .iter()
1688 .map(|e| e.transaction_digest())
1689 .collect::<Vec<_>>()
1690 );
1691 }
1692 assert_eq!(unsorted_ccp.len(), 1);
1693 assert_eq!(unsorted_ccp[0].transaction_digest(), ccp_digest);
1694 }
1695
1696 let unsorted =
1697 self.complete_checkpoint_effects(root_effects, &mut effects_in_current_checkpoint)?;
1698
1699 let _scope = monitored_scope("CheckpointBuilder::causal_sort");
1700 let mut sorted: Vec<TransactionEffects> = Vec::with_capacity(unsorted.len() + 1);
1701 if let Some((ccp_digest, ccp_effects)) = consensus_commit_prologue {
1702 if cfg!(debug_assertions) {
1703 for tx in unsorted.iter() {
1705 assert!(tx.transaction_digest() != &ccp_digest);
1706 }
1707 }
1708 sorted.push(ccp_effects);
1709 }
1710 sorted.extend(CausalOrder::causal_sort(unsorted));
1711
1712 if let Some(settlement_root) = settlement_root {
1713 let last_checkpoint =
1716 Self::load_last_built_checkpoint_summary(&self.epoch_store, &self.store)?;
1717 let next_checkpoint_seq = last_checkpoint
1718 .as_ref()
1719 .map(|(seq, _)| *seq)
1720 .unwrap_or_default()
1721 + 1;
1722 let tx_index_offset = tx_effects.len() as u64;
1723
1724 let (tx_key, settlement_effects) = self
1725 .construct_and_execute_settlement_transactions(
1726 &sorted,
1727 pending.details.checkpoint_height,
1728 next_checkpoint_seq,
1729 tx_index_offset,
1730 )
1731 .await;
1732 debug!(?tx_key, "executed settlement transactions");
1733
1734 assert_eq!(settlement_root, tx_key);
1735
1736 sorted.extend(settlement_effects);
1743 }
1744
1745 #[cfg(msim)]
1746 {
1747 self.expensive_consensus_commit_prologue_invariants_check(&root_digests, &sorted);
1749 }
1750
1751 tx_effects.extend(sorted);
1752 tx_roots.extend(root_digests);
1753 }
1754
1755 Ok((tx_effects, tx_roots))
1756 }
1757
1758 fn extract_consensus_commit_prologue(
1763 &self,
1764 root_digests: &[TransactionDigest],
1765 root_effects: &[TransactionEffects],
1766 ) -> SuiResult<Option<(TransactionDigest, TransactionEffects)>> {
1767 let _scope = monitored_scope("CheckpointBuilder::extract_consensus_commit_prologue");
1768 if root_digests.is_empty() {
1769 return Ok(None);
1770 }
1771
1772 let first_tx = self
1777 .state
1778 .get_transaction_cache_reader()
1779 .get_transaction_block(&root_digests[0])
1780 .expect("Transaction block must exist");
1781
1782 Ok(first_tx
1783 .transaction_data()
1784 .is_consensus_commit_prologue()
1785 .then(|| {
1786 assert_eq!(first_tx.digest(), root_effects[0].transaction_digest());
1787 (*first_tx.digest(), root_effects[0].clone())
1788 }))
1789 }
1790
1791 #[instrument(level = "debug", skip_all)]
1792 async fn write_checkpoints(
1793 &mut self,
1794 height: CheckpointHeight,
1795 new_checkpoints: NonEmpty<(CheckpointSummary, CheckpointContents)>,
1796 ) -> SuiResult {
1797 let _scope = monitored_scope("CheckpointBuilder::write_checkpoints");
1798 let mut batch = self.store.tables.checkpoint_content.batch();
1799 let mut all_tx_digests =
1800 Vec::with_capacity(new_checkpoints.iter().map(|(_, c)| c.size()).sum());
1801
1802 for (summary, contents) in &new_checkpoints {
1803 debug!(
1804 checkpoint_commit_height = height,
1805 checkpoint_seq = summary.sequence_number,
1806 contents_digest = ?contents.digest(),
1807 "writing checkpoint",
1808 );
1809
1810 if let Some(previously_computed_summary) = self
1811 .store
1812 .tables
1813 .locally_computed_checkpoints
1814 .get(&summary.sequence_number)?
1815 && previously_computed_summary.digest() != summary.digest()
1816 {
1817 fatal!(
1818 "Checkpoint {} was previously built with a different result: previously_computed_summary {:?} vs current_summary {:?}",
1819 summary.sequence_number,
1820 previously_computed_summary.digest(),
1821 summary.digest()
1822 );
1823 }
1824
1825 all_tx_digests.extend(contents.iter().map(|digests| digests.transaction));
1826
1827 self.metrics
1828 .transactions_included_in_checkpoint
1829 .inc_by(contents.size() as u64);
1830 let sequence_number = summary.sequence_number;
1831 self.metrics
1832 .last_constructed_checkpoint
1833 .set(sequence_number as i64);
1834
1835 batch.insert_batch(
1836 &self.store.tables.checkpoint_content,
1837 [(contents.digest(), contents)],
1838 )?;
1839
1840 batch.insert_batch(
1841 &self.store.tables.locally_computed_checkpoints,
1842 [(sequence_number, summary)],
1843 )?;
1844 }
1845
1846 batch.write()?;
1847
1848 for (summary, contents) in &new_checkpoints {
1850 self.output
1851 .checkpoint_created(summary, contents, &self.epoch_store, &self.store)
1852 .await?;
1853 }
1854
1855 for (local_checkpoint, _) in &new_checkpoints {
1856 if let Some(certified_checkpoint) = self
1857 .store
1858 .tables
1859 .certified_checkpoints
1860 .get(local_checkpoint.sequence_number())?
1861 {
1862 self.store
1863 .check_for_checkpoint_fork(local_checkpoint, &certified_checkpoint.into());
1864 }
1865 }
1866
1867 self.notify_aggregator.notify_one();
1868 self.epoch_store
1869 .process_constructed_checkpoint(height, new_checkpoints);
1870 Ok(())
1871 }
1872
1873 #[allow(clippy::type_complexity)]
1874 fn split_checkpoint_chunks(
1875 &self,
1876 effects_and_transaction_sizes: Vec<(TransactionEffects, usize)>,
1877 signatures: Vec<Vec<(GenericSignature, Option<SequenceNumber>)>>,
1878 ) -> CheckpointBuilderResult<
1879 Vec<
1880 Vec<(
1881 TransactionEffects,
1882 Vec<(GenericSignature, Option<SequenceNumber>)>,
1883 )>,
1884 >,
1885 > {
1886 let _guard = monitored_scope("CheckpointBuilder::split_checkpoint_chunks");
1887 let mut chunks = Vec::new();
1888 let mut chunk = Vec::new();
1889 let mut chunk_size: usize = 0;
1890 for ((effects, transaction_size), signatures) in effects_and_transaction_sizes
1891 .into_iter()
1892 .zip(signatures.into_iter())
1893 {
1894 let signatures_size = if self.epoch_store.protocol_config().address_aliases() {
1899 bcs::serialized_size(&signatures)?
1900 } else {
1901 let signatures: Vec<&GenericSignature> =
1902 signatures.iter().map(|(s, _)| s).collect();
1903 bcs::serialized_size(&signatures)?
1904 };
1905 let size = transaction_size + bcs::serialized_size(&effects)? + signatures_size;
1906 if chunk.len() == self.max_transactions_per_checkpoint
1907 || (chunk_size + size) > self.max_checkpoint_size_bytes
1908 {
1909 if chunk.is_empty() {
1910 warn!(
1912 "Size of single transaction ({size}) exceeds max checkpoint size ({}); allowing excessively large checkpoint to go through.",
1913 self.max_checkpoint_size_bytes
1914 );
1915 } else {
1916 chunks.push(chunk);
1917 chunk = Vec::new();
1918 chunk_size = 0;
1919 }
1920 }
1921
1922 chunk.push((effects, signatures));
1923 chunk_size += size;
1924 }
1925
1926 if !chunk.is_empty() || chunks.is_empty() {
1927 chunks.push(chunk);
1932 }
1937 Ok(chunks)
1938 }
1939
1940 fn load_last_built_checkpoint_summary(
1941 epoch_store: &AuthorityPerEpochStore,
1942 store: &CheckpointStore,
1943 ) -> SuiResult<Option<(CheckpointSequenceNumber, CheckpointSummary)>> {
1944 let mut last_checkpoint = epoch_store.last_built_checkpoint_summary()?;
1945 if last_checkpoint.is_none() {
1946 let epoch = epoch_store.epoch();
1947 if epoch > 0 {
1948 let previous_epoch = epoch - 1;
1949 let last_verified = store.get_epoch_last_checkpoint(previous_epoch)?;
1950 last_checkpoint = last_verified.map(VerifiedCheckpoint::into_summary_and_sequence);
1951 if let Some((ref seq, _)) = last_checkpoint {
1952 debug!(
1953 "No checkpoints in builder DB, taking checkpoint from previous epoch with sequence {seq}"
1954 );
1955 } else {
1956 panic!("Can not find last checkpoint for previous epoch {previous_epoch}");
1958 }
1959 }
1960 }
1961 Ok(last_checkpoint)
1962 }
1963
1964 #[instrument(level = "debug", skip_all)]
1965 async fn create_checkpoints(
1966 &self,
1967 all_effects: Vec<TransactionEffects>,
1968 details: &PendingCheckpointInfo,
1969 all_roots: &HashSet<TransactionDigest>,
1970 ) -> CheckpointBuilderResult<NonEmpty<(CheckpointSummary, CheckpointContents)>> {
1971 let _scope = monitored_scope("CheckpointBuilder::create_checkpoints");
1972
1973 let total = all_effects.len();
1974 let mut last_checkpoint =
1975 Self::load_last_built_checkpoint_summary(&self.epoch_store, &self.store)?;
1976 let last_checkpoint_seq = last_checkpoint.as_ref().map(|(seq, _)| *seq);
1977 info!(
1978 checkpoint_commit_height = details.checkpoint_height,
1979 next_checkpoint_seq = last_checkpoint_seq.unwrap_or_default() + 1,
1980 checkpoint_timestamp = details.timestamp_ms,
1981 "Creating checkpoint(s) for {} transactions",
1982 all_effects.len(),
1983 );
1984
1985 let all_digests: Vec<_> = all_effects
1986 .iter()
1987 .map(|effect| *effect.transaction_digest())
1988 .collect();
1989 let transactions_and_sizes = self
1990 .state
1991 .get_transaction_cache_reader()
1992 .get_transactions_and_serialized_sizes(&all_digests)?;
1993 let mut all_effects_and_transaction_sizes = Vec::with_capacity(all_effects.len());
1994 let mut transactions = Vec::with_capacity(all_effects.len());
1995 let mut transaction_keys = Vec::with_capacity(all_effects.len());
1996 let mut randomness_rounds = BTreeMap::new();
1997 {
1998 let _guard = monitored_scope("CheckpointBuilder::wait_for_transactions_sequenced");
1999 debug!(
2000 ?last_checkpoint_seq,
2001 "Waiting for {:?} certificates to appear in consensus",
2002 all_effects.len()
2003 );
2004
2005 for (effects, transaction_and_size) in all_effects
2006 .into_iter()
2007 .zip(transactions_and_sizes.into_iter())
2008 {
2009 let (transaction, size) = transaction_and_size
2010 .unwrap_or_else(|| panic!("Could not find executed transaction {:?}", effects));
2011 match transaction.inner().transaction_data().kind() {
2012 TransactionKind::ConsensusCommitPrologue(_)
2013 | TransactionKind::ConsensusCommitPrologueV2(_)
2014 | TransactionKind::ConsensusCommitPrologueV3(_)
2015 | TransactionKind::ConsensusCommitPrologueV4(_)
2016 | TransactionKind::AuthenticatorStateUpdate(_) => {
2017 }
2020 TransactionKind::ProgrammableSystemTransaction(_) => {
2021 }
2023 TransactionKind::ChangeEpoch(_)
2024 | TransactionKind::Genesis(_)
2025 | TransactionKind::EndOfEpochTransaction(_) => {
2026 fatal!(
2027 "unexpected transaction in checkpoint effects: {:?}",
2028 transaction
2029 );
2030 }
2031 TransactionKind::RandomnessStateUpdate(rsu) => {
2032 randomness_rounds
2033 .insert(*effects.transaction_digest(), rsu.randomness_round);
2034 }
2035 TransactionKind::ProgrammableTransaction(_) => {
2036 let digest = *effects.transaction_digest();
2040 if !all_roots.contains(&digest) {
2041 transaction_keys.push(SequencedConsensusTransactionKey::External(
2042 ConsensusTransactionKey::Certificate(digest),
2043 ));
2044 }
2045 }
2046 }
2047 transactions.push(transaction);
2048 all_effects_and_transaction_sizes.push((effects, size));
2049 }
2050
2051 self.epoch_store
2052 .consensus_messages_processed_notify(transaction_keys)
2053 .await?;
2054 }
2055
2056 let signatures = self
2057 .epoch_store
2058 .user_signatures_for_checkpoint(&transactions, &all_digests);
2059 debug!(
2060 ?last_checkpoint_seq,
2061 "Received {} checkpoint user signatures from consensus",
2062 signatures.len()
2063 );
2064
2065 let mut end_of_epoch_observation_keys: Option<Vec<_>> = if details.last_of_epoch {
2066 Some(
2067 transactions
2068 .iter()
2069 .flat_map(|tx| {
2070 if let TransactionKind::ProgrammableTransaction(ptb) =
2071 tx.transaction_data().kind()
2072 {
2073 itertools::Either::Left(
2074 ptb.commands
2075 .iter()
2076 .map(ExecutionTimeObservationKey::from_command),
2077 )
2078 } else {
2079 itertools::Either::Right(std::iter::empty())
2080 }
2081 })
2082 .collect(),
2083 )
2084 } else {
2085 None
2086 };
2087
2088 let chunks = self.split_checkpoint_chunks(all_effects_and_transaction_sizes, signatures)?;
2089 let chunks_count = chunks.len();
2090
2091 let mut checkpoints = Vec::with_capacity(chunks_count);
2092 debug!(
2093 ?last_checkpoint_seq,
2094 "Creating {} checkpoints with {} transactions", chunks_count, total,
2095 );
2096
2097 let epoch = self.epoch_store.epoch();
2098 for (index, transactions) in chunks.into_iter().enumerate() {
2099 let first_checkpoint_of_epoch = index == 0
2100 && last_checkpoint
2101 .as_ref()
2102 .map(|(_, c)| c.epoch != epoch)
2103 .unwrap_or(true);
2104 if first_checkpoint_of_epoch {
2105 self.epoch_store
2106 .record_epoch_first_checkpoint_creation_time_metric();
2107 }
2108 let last_checkpoint_of_epoch = details.last_of_epoch && index == chunks_count - 1;
2109
2110 let sequence_number = last_checkpoint
2111 .as_ref()
2112 .map(|(_, c)| c.sequence_number + 1)
2113 .unwrap_or_default();
2114 let mut timestamp_ms = details.timestamp_ms;
2115 if let Some((_, last_checkpoint)) = &last_checkpoint
2116 && last_checkpoint.timestamp_ms > timestamp_ms
2117 {
2118 debug!(
2120 "Decrease of checkpoint timestamp, possibly due to epoch change. Sequence: {}, previous: {}, current: {}",
2121 sequence_number, last_checkpoint.timestamp_ms, timestamp_ms,
2122 );
2123 if self
2124 .epoch_store
2125 .protocol_config()
2126 .enforce_checkpoint_timestamp_monotonicity()
2127 {
2128 timestamp_ms = last_checkpoint.timestamp_ms;
2129 }
2130 }
2131
2132 let (mut effects, mut signatures): (Vec<_>, Vec<_>) = transactions.into_iter().unzip();
2133 let epoch_rolling_gas_cost_summary =
2134 self.get_epoch_total_gas_cost(last_checkpoint.as_ref().map(|(_, c)| c), &effects);
2135
2136 let end_of_epoch_data = if last_checkpoint_of_epoch {
2137 let system_state_obj = self
2138 .augment_epoch_last_checkpoint(
2139 &epoch_rolling_gas_cost_summary,
2140 timestamp_ms,
2141 &mut effects,
2142 &mut signatures,
2143 sequence_number,
2144 std::mem::take(&mut end_of_epoch_observation_keys).expect("end_of_epoch_observation_keys must be populated for the last checkpoint"),
2145 last_checkpoint_seq.unwrap_or_default(),
2146 )
2147 .await?;
2148
2149 let committee = system_state_obj
2150 .get_current_epoch_committee()
2151 .committee()
2152 .clone();
2153
2154 let root_state_digest = {
2157 let state_acc = self
2158 .global_state_hasher
2159 .upgrade()
2160 .expect("No checkpoints should be getting built after local configuration");
2161 let acc = state_acc.accumulate_checkpoint(
2162 &effects,
2163 sequence_number,
2164 &self.epoch_store,
2165 )?;
2166
2167 state_acc
2168 .wait_for_previous_running_root(&self.epoch_store, sequence_number)
2169 .await?;
2170
2171 state_acc.accumulate_running_root(
2172 &self.epoch_store,
2173 sequence_number,
2174 Some(acc),
2175 )?;
2176 state_acc
2177 .digest_epoch(self.epoch_store.clone(), sequence_number)
2178 .await?
2179 };
2180 self.metrics.highest_accumulated_epoch.set(epoch as i64);
2181 info!("Epoch {epoch} root state hash digest: {root_state_digest:?}");
2182
2183 let epoch_commitments = if self
2184 .epoch_store
2185 .protocol_config()
2186 .check_commit_root_state_digest_supported()
2187 {
2188 vec![root_state_digest.into()]
2189 } else {
2190 vec![]
2191 };
2192
2193 Some(EndOfEpochData {
2194 next_epoch_committee: committee.voting_rights,
2195 next_epoch_protocol_version: ProtocolVersion::new(
2196 system_state_obj.protocol_version(),
2197 ),
2198 epoch_commitments,
2199 })
2200 } else {
2201 self.send_to_hasher
2202 .send((sequence_number, effects.clone()))
2203 .await?;
2204
2205 None
2206 };
2207 let contents = if self.epoch_store.protocol_config().address_aliases() {
2208 CheckpointContents::new_v2(&effects, signatures)
2209 } else {
2210 CheckpointContents::new_with_digests_and_signatures(
2211 effects.iter().map(TransactionEffects::execution_digests),
2212 signatures
2213 .into_iter()
2214 .map(|sigs| sigs.into_iter().map(|(s, _)| s).collect())
2215 .collect(),
2216 )
2217 };
2218
2219 let num_txns = contents.size() as u64;
2220
2221 let network_total_transactions = last_checkpoint
2222 .as_ref()
2223 .map(|(_, c)| c.network_total_transactions + num_txns)
2224 .unwrap_or(num_txns);
2225
2226 let previous_digest = last_checkpoint.as_ref().map(|(_, c)| c.digest());
2227
2228 let matching_randomness_rounds: Vec<_> = effects
2229 .iter()
2230 .filter_map(|e| randomness_rounds.get(e.transaction_digest()))
2231 .copied()
2232 .collect();
2233
2234 let checkpoint_commitments = if self
2235 .epoch_store
2236 .protocol_config()
2237 .include_checkpoint_artifacts_digest_in_summary()
2238 {
2239 let artifacts = CheckpointArtifacts::from(&effects[..]);
2240 let artifacts_digest = artifacts.digest()?;
2241 vec![artifacts_digest.into()]
2242 } else {
2243 Default::default()
2244 };
2245
2246 let summary = CheckpointSummary::new(
2247 self.epoch_store.protocol_config(),
2248 epoch,
2249 sequence_number,
2250 network_total_transactions,
2251 &contents,
2252 previous_digest,
2253 epoch_rolling_gas_cost_summary,
2254 end_of_epoch_data,
2255 timestamp_ms,
2256 matching_randomness_rounds,
2257 checkpoint_commitments,
2258 );
2259 summary.report_checkpoint_age(
2260 &self.metrics.last_created_checkpoint_age,
2261 &self.metrics.last_created_checkpoint_age_ms,
2262 );
2263 if last_checkpoint_of_epoch {
2264 info!(
2265 checkpoint_seq = sequence_number,
2266 "creating last checkpoint of epoch {}", epoch
2267 );
2268 if let Some(stats) = self.store.get_epoch_stats(epoch, &summary) {
2269 self.epoch_store
2270 .report_epoch_metrics_at_last_checkpoint(stats);
2271 }
2272 }
2273 last_checkpoint = Some((sequence_number, summary.clone()));
2274 checkpoints.push((summary, contents));
2275 }
2276
2277 Ok(NonEmpty::from_vec(checkpoints).expect("at least one checkpoint"))
2278 }
2279
2280 fn get_epoch_total_gas_cost(
2281 &self,
2282 last_checkpoint: Option<&CheckpointSummary>,
2283 cur_checkpoint_effects: &[TransactionEffects],
2284 ) -> GasCostSummary {
2285 let (previous_epoch, previous_gas_costs) = last_checkpoint
2286 .map(|c| (c.epoch, c.epoch_rolling_gas_cost_summary.clone()))
2287 .unwrap_or_default();
2288 let current_gas_costs = GasCostSummary::new_from_txn_effects(cur_checkpoint_effects.iter());
2289 if previous_epoch == self.epoch_store.epoch() {
2290 GasCostSummary::new(
2292 previous_gas_costs.computation_cost + current_gas_costs.computation_cost,
2293 previous_gas_costs.storage_cost + current_gas_costs.storage_cost,
2294 previous_gas_costs.storage_rebate + current_gas_costs.storage_rebate,
2295 previous_gas_costs.non_refundable_storage_fee
2296 + current_gas_costs.non_refundable_storage_fee,
2297 )
2298 } else {
2299 current_gas_costs
2300 }
2301 }
2302
2303 #[instrument(level = "error", skip_all)]
2304 async fn augment_epoch_last_checkpoint(
2305 &self,
2306 epoch_total_gas_cost: &GasCostSummary,
2307 epoch_start_timestamp_ms: CheckpointTimestamp,
2308 checkpoint_effects: &mut Vec<TransactionEffects>,
2309 signatures: &mut Vec<Vec<(GenericSignature, Option<SequenceNumber>)>>,
2310 checkpoint: CheckpointSequenceNumber,
2311 end_of_epoch_observation_keys: Vec<ExecutionTimeObservationKey>,
2312 last_checkpoint: CheckpointSequenceNumber,
2315 ) -> CheckpointBuilderResult<SuiSystemState> {
2316 let (system_state, effects) = self
2317 .state
2318 .create_and_execute_advance_epoch_tx(
2319 &self.epoch_store,
2320 epoch_total_gas_cost,
2321 checkpoint,
2322 epoch_start_timestamp_ms,
2323 end_of_epoch_observation_keys,
2324 last_checkpoint,
2325 )
2326 .await?;
2327 checkpoint_effects.push(effects);
2328 signatures.push(vec![]);
2329 Ok(system_state)
2330 }
2331
2332 #[instrument(level = "debug", skip_all)]
2339 fn complete_checkpoint_effects(
2340 &self,
2341 mut roots: Vec<TransactionEffects>,
2342 existing_tx_digests_in_checkpoint: &mut BTreeSet<TransactionDigest>,
2343 ) -> SuiResult<Vec<TransactionEffects>> {
2344 let _scope = monitored_scope("CheckpointBuilder::complete_checkpoint_effects");
2345 let mut results = vec![];
2346 let mut seen = HashSet::new();
2347 loop {
2348 let mut pending = HashSet::new();
2349
2350 let transactions_included = self
2351 .epoch_store
2352 .builder_included_transactions_in_checkpoint(
2353 roots.iter().map(|e| e.transaction_digest()),
2354 )?;
2355
2356 for (effect, tx_included) in roots.into_iter().zip(transactions_included.into_iter()) {
2357 let digest = effect.transaction_digest();
2358 seen.insert(*digest);
2360
2361 if existing_tx_digests_in_checkpoint.contains(effect.transaction_digest()) {
2363 continue;
2364 }
2365
2366 if tx_included || effect.executed_epoch() < self.epoch_store.epoch() {
2368 continue;
2369 }
2370
2371 let existing_effects = self
2372 .epoch_store
2373 .transactions_executed_in_cur_epoch(effect.dependencies())?;
2374
2375 for (dependency, effects_signature_exists) in
2376 effect.dependencies().iter().zip(existing_effects.iter())
2377 {
2378 if !effects_signature_exists {
2383 continue;
2384 }
2385 if seen.insert(*dependency) {
2386 pending.insert(*dependency);
2387 }
2388 }
2389 results.push(effect);
2390 }
2391 if pending.is_empty() {
2392 break;
2393 }
2394 let pending = pending.into_iter().collect::<Vec<_>>();
2395 let effects = self.effects_store.multi_get_executed_effects(&pending);
2396 let effects = effects
2397 .into_iter()
2398 .zip(pending)
2399 .map(|(opt, digest)| match opt {
2400 Some(x) => x,
2401 None => panic!(
2402 "Can not find effect for transaction {:?}, however transaction that depend on it was already executed",
2403 digest
2404 ),
2405 })
2406 .collect::<Vec<_>>();
2407 roots = effects;
2408 }
2409
2410 existing_tx_digests_in_checkpoint.extend(results.iter().map(|e| e.transaction_digest()));
2411 Ok(results)
2412 }
2413
2414 #[cfg(msim)]
2417 fn expensive_consensus_commit_prologue_invariants_check(
2418 &self,
2419 root_digests: &[TransactionDigest],
2420 sorted: &[TransactionEffects],
2421 ) {
2422 let root_txs = self
2424 .state
2425 .get_transaction_cache_reader()
2426 .multi_get_transaction_blocks(root_digests);
2427 let ccps = root_txs
2428 .iter()
2429 .filter_map(|tx| {
2430 if let Some(tx) = tx {
2431 if tx.transaction_data().is_consensus_commit_prologue() {
2432 Some(tx)
2433 } else {
2434 None
2435 }
2436 } else {
2437 None
2438 }
2439 })
2440 .collect::<Vec<_>>();
2441
2442 assert!(ccps.len() <= 1);
2444
2445 let txs = self
2447 .state
2448 .get_transaction_cache_reader()
2449 .multi_get_transaction_blocks(
2450 &sorted
2451 .iter()
2452 .map(|tx| tx.transaction_digest().clone())
2453 .collect::<Vec<_>>(),
2454 );
2455
2456 if ccps.len() == 0 {
2457 for tx in txs.iter() {
2460 if let Some(tx) = tx {
2461 assert!(!tx.transaction_data().is_consensus_commit_prologue());
2462 }
2463 }
2464 } else {
2465 assert!(
2467 txs[0]
2468 .as_ref()
2469 .unwrap()
2470 .transaction_data()
2471 .is_consensus_commit_prologue()
2472 );
2473
2474 assert_eq!(ccps[0].digest(), txs[0].as_ref().unwrap().digest());
2475
2476 for tx in txs.iter().skip(1) {
2477 if let Some(tx) = tx {
2478 assert!(!tx.transaction_data().is_consensus_commit_prologue());
2479 }
2480 }
2481 }
2482 }
2483}
2484
2485async fn wait_for_effects_with_retry(
2486 effects_store: &dyn TransactionCacheRead,
2487 task_name: &'static str,
2488 digests: &[TransactionDigest],
2489 tx_key: TransactionKey,
2490) -> Vec<TransactionEffects> {
2491 let delay = if in_antithesis() {
2492 15
2494 } else {
2495 5
2496 };
2497 loop {
2498 match tokio::time::timeout(Duration::from_secs(delay), async {
2499 effects_store
2500 .notify_read_executed_effects(task_name, digests)
2501 .await
2502 })
2503 .await
2504 {
2505 Ok(effects) => break effects,
2506 Err(_) => {
2507 debug_fatal!(
2508 "Timeout waiting for transactions to be executed {:?}, retrying...",
2509 tx_key
2510 );
2511 }
2512 }
2513 }
2514}
2515
2516impl CheckpointAggregator {
2517 fn new(
2518 tables: Arc<CheckpointStore>,
2519 epoch_store: Arc<AuthorityPerEpochStore>,
2520 notify: Arc<Notify>,
2521 output: Box<dyn CertifiedCheckpointOutput>,
2522 state: Arc<AuthorityState>,
2523 metrics: Arc<CheckpointMetrics>,
2524 ) -> Self {
2525 let current = None;
2526 Self {
2527 store: tables,
2528 epoch_store,
2529 notify,
2530 current,
2531 output,
2532 state,
2533 metrics,
2534 }
2535 }
2536
2537 async fn run(mut self) {
2538 info!("Starting CheckpointAggregator");
2539 loop {
2540 if let Err(e) = self.run_and_notify().await {
2541 error!(
2542 "Error while aggregating checkpoint, will retry in 1s: {:?}",
2543 e
2544 );
2545 self.metrics.checkpoint_errors.inc();
2546 tokio::time::sleep(Duration::from_secs(1)).await;
2547 continue;
2548 }
2549
2550 let _ = timeout(Duration::from_secs(1), self.notify.notified()).await;
2551 }
2552 }
2553
2554 async fn run_and_notify(&mut self) -> SuiResult {
2555 let summaries = self.run_inner()?;
2556 for summary in summaries {
2557 self.output.certified_checkpoint_created(&summary).await?;
2558 }
2559 Ok(())
2560 }
2561
2562 fn run_inner(&mut self) -> SuiResult<Vec<CertifiedCheckpointSummary>> {
2563 let _scope = monitored_scope("CheckpointAggregator");
2564 let mut result = vec![];
2565 'outer: loop {
2566 let next_to_certify = self.next_checkpoint_to_certify()?;
2567 let current = if let Some(current) = &mut self.current {
2568 if current.summary.sequence_number < next_to_certify {
2574 assert_reachable!("skip checkpoint certification");
2575 self.current = None;
2576 continue;
2577 }
2578 current
2579 } else {
2580 let Some(summary) = self
2581 .epoch_store
2582 .get_built_checkpoint_summary(next_to_certify)?
2583 else {
2584 return Ok(result);
2585 };
2586 self.current = Some(CheckpointSignatureAggregator {
2587 next_index: 0,
2588 digest: summary.digest(),
2589 summary,
2590 signatures_by_digest: MultiStakeAggregator::new(
2591 self.epoch_store.committee().clone(),
2592 ),
2593 store: self.store.clone(),
2594 state: self.state.clone(),
2595 metrics: self.metrics.clone(),
2596 });
2597 self.current.as_mut().unwrap()
2598 };
2599
2600 let epoch_tables = self
2601 .epoch_store
2602 .tables()
2603 .expect("should not run past end of epoch");
2604 let iter = epoch_tables
2605 .pending_checkpoint_signatures
2606 .safe_iter_with_bounds(
2607 Some((current.summary.sequence_number, current.next_index)),
2608 None,
2609 );
2610 for item in iter {
2611 let ((seq, index), data) = item?;
2612 if seq != current.summary.sequence_number {
2613 trace!(
2614 checkpoint_seq =? current.summary.sequence_number,
2615 "Not enough checkpoint signatures",
2616 );
2617 return Ok(result);
2619 }
2620 trace!(
2621 checkpoint_seq = current.summary.sequence_number,
2622 "Processing signature for checkpoint (digest: {:?}) from {:?}",
2623 current.summary.digest(),
2624 data.summary.auth_sig().authority.concise()
2625 );
2626 self.metrics
2627 .checkpoint_participation
2628 .with_label_values(&[&format!(
2629 "{:?}",
2630 data.summary.auth_sig().authority.concise()
2631 )])
2632 .inc();
2633 if let Ok(auth_signature) = current.try_aggregate(data) {
2634 debug!(
2635 checkpoint_seq = current.summary.sequence_number,
2636 "Successfully aggregated signatures for checkpoint (digest: {:?})",
2637 current.summary.digest(),
2638 );
2639 let summary = VerifiedCheckpoint::new_unchecked(
2640 CertifiedCheckpointSummary::new_from_data_and_sig(
2641 current.summary.clone(),
2642 auth_signature,
2643 ),
2644 );
2645
2646 self.store.insert_certified_checkpoint(&summary)?;
2647 self.metrics
2648 .last_certified_checkpoint
2649 .set(current.summary.sequence_number as i64);
2650 current.summary.report_checkpoint_age(
2651 &self.metrics.last_certified_checkpoint_age,
2652 &self.metrics.last_certified_checkpoint_age_ms,
2653 );
2654 result.push(summary.into_inner());
2655 self.current = None;
2656 continue 'outer;
2657 } else {
2658 current.next_index = index + 1;
2659 }
2660 }
2661 break;
2662 }
2663 Ok(result)
2664 }
2665
2666 fn next_checkpoint_to_certify(&self) -> SuiResult<CheckpointSequenceNumber> {
2667 Ok(self
2668 .store
2669 .tables
2670 .certified_checkpoints
2671 .reversed_safe_iter_with_bounds(None, None)?
2672 .next()
2673 .transpose()?
2674 .map(|(seq, _)| seq + 1)
2675 .unwrap_or_default())
2676 }
2677}
2678
2679impl CheckpointSignatureAggregator {
2680 #[allow(clippy::result_unit_err)]
2681 pub fn try_aggregate(
2682 &mut self,
2683 data: CheckpointSignatureMessage,
2684 ) -> Result<AuthorityStrongQuorumSignInfo, ()> {
2685 let their_digest = *data.summary.digest();
2686 let (_, signature) = data.summary.into_data_and_sig();
2687 let author = signature.authority;
2688 let envelope =
2689 SignedCheckpointSummary::new_from_data_and_sig(self.summary.clone(), signature);
2690 match self.signatures_by_digest.insert(their_digest, envelope) {
2691 InsertResult::Failed { error }
2693 if matches!(
2694 error.as_inner(),
2695 SuiErrorKind::StakeAggregatorRepeatedSigner {
2696 conflicting_sig: false,
2697 ..
2698 },
2699 ) =>
2700 {
2701 Err(())
2702 }
2703 InsertResult::Failed { error } => {
2704 warn!(
2705 checkpoint_seq = self.summary.sequence_number,
2706 "Failed to aggregate new signature from validator {:?}: {:?}",
2707 author.concise(),
2708 error
2709 );
2710 self.check_for_split_brain();
2711 Err(())
2712 }
2713 InsertResult::QuorumReached(cert) => {
2714 if their_digest != self.digest {
2717 self.metrics.remote_checkpoint_forks.inc();
2718 warn!(
2719 checkpoint_seq = self.summary.sequence_number,
2720 "Validator {:?} has mismatching checkpoint digest {}, we have digest {}",
2721 author.concise(),
2722 their_digest,
2723 self.digest
2724 );
2725 return Err(());
2726 }
2727 Ok(cert)
2728 }
2729 InsertResult::NotEnoughVotes {
2730 bad_votes: _,
2731 bad_authorities: _,
2732 } => {
2733 self.check_for_split_brain();
2734 Err(())
2735 }
2736 }
2737 }
2738
2739 fn check_for_split_brain(&self) {
2743 debug!(
2744 checkpoint_seq = self.summary.sequence_number,
2745 "Checking for split brain condition"
2746 );
2747 if self.signatures_by_digest.quorum_unreachable() {
2748 let all_unique_values = self.signatures_by_digest.get_all_unique_values();
2754 let digests_by_stake_messages = all_unique_values
2755 .iter()
2756 .sorted_by_key(|(_, (_, stake))| -(*stake as i64))
2757 .map(|(digest, (_authorities, total_stake))| {
2758 format!("{:?} (total stake: {})", digest, total_stake)
2759 })
2760 .collect::<Vec<String>>();
2761 fail_point_arg!("kill_split_brain_node", |(
2762 checkpoint_overrides,
2763 forked_authorities,
2764 ): (
2765 std::sync::Arc<std::sync::Mutex<std::collections::BTreeMap<u64, String>>>,
2766 std::sync::Arc<std::sync::Mutex<std::collections::HashSet<AuthorityName>>>,
2767 )| {
2768 #[cfg(msim)]
2769 {
2770 if let (Ok(mut overrides), Ok(forked_authorities_set)) =
2771 (checkpoint_overrides.lock(), forked_authorities.lock())
2772 {
2773 let correct_digest = all_unique_values
2775 .iter()
2776 .find(|(_, (authorities, _))| {
2777 authorities
2779 .iter()
2780 .any(|auth| !forked_authorities_set.contains(auth))
2781 })
2782 .map(|(digest, _)| digest.to_string())
2783 .unwrap_or_else(|| {
2784 all_unique_values
2786 .iter()
2787 .max_by_key(|(_, (_, stake))| *stake)
2788 .map(|(digest, _)| digest.to_string())
2789 .unwrap_or_else(|| self.digest.to_string())
2790 });
2791
2792 overrides.insert(self.summary.sequence_number, correct_digest.clone());
2793
2794 tracing::error!(
2795 fatal = true,
2796 "Fork recovery test: detected split-brain for sequence number: {}, using digest: {}",
2797 self.summary.sequence_number,
2798 correct_digest
2799 );
2800 }
2801 }
2802 });
2803
2804 debug_fatal!(
2805 "Split brain detected in checkpoint signature aggregation for checkpoint {:?}. Remaining stake: {:?}, Digests by stake: {:?}",
2806 self.summary.sequence_number,
2807 self.signatures_by_digest.uncommitted_stake(),
2808 digests_by_stake_messages
2809 );
2810 self.metrics.split_brain_checkpoint_forks.inc();
2811
2812 let all_unique_values = self.signatures_by_digest.get_all_unique_values();
2813 let local_summary = self.summary.clone();
2814 let state = self.state.clone();
2815 let tables = self.store.clone();
2816
2817 tokio::spawn(async move {
2818 diagnose_split_brain(all_unique_values, local_summary, state, tables).await;
2819 });
2820 }
2821 }
2822}
2823
2824async fn diagnose_split_brain(
2830 all_unique_values: BTreeMap<CheckpointDigest, (Vec<AuthorityName>, StakeUnit)>,
2831 local_summary: CheckpointSummary,
2832 state: Arc<AuthorityState>,
2833 tables: Arc<CheckpointStore>,
2834) {
2835 debug!(
2836 checkpoint_seq = local_summary.sequence_number,
2837 "Running split brain diagnostics..."
2838 );
2839 let time = SystemTime::now();
2840 let digest_to_validator = all_unique_values
2842 .iter()
2843 .filter_map(|(digest, (validators, _))| {
2844 if *digest != local_summary.digest() {
2845 let random_validator = validators.choose(&mut get_rng()).unwrap();
2846 Some((*digest, *random_validator))
2847 } else {
2848 None
2849 }
2850 })
2851 .collect::<HashMap<_, _>>();
2852 if digest_to_validator.is_empty() {
2853 panic!(
2854 "Given split brain condition, there should be at \
2855 least one validator that disagrees with local signature"
2856 );
2857 }
2858
2859 let epoch_store = state.load_epoch_store_one_call_per_task();
2860 let committee = epoch_store
2861 .epoch_start_state()
2862 .get_sui_committee_with_network_metadata();
2863 let network_config = default_mysten_network_config();
2864 let network_clients =
2865 make_network_authority_clients_with_network_config(&committee, &network_config);
2866
2867 let response_futures = digest_to_validator
2869 .values()
2870 .cloned()
2871 .map(|validator| {
2872 let client = network_clients
2873 .get(&validator)
2874 .expect("Failed to get network client");
2875 let request = CheckpointRequestV2 {
2876 sequence_number: Some(local_summary.sequence_number),
2877 request_content: true,
2878 certified: false,
2879 };
2880 client.handle_checkpoint_v2(request)
2881 })
2882 .collect::<Vec<_>>();
2883
2884 let digest_name_pair = digest_to_validator.iter();
2885 let response_data = futures::future::join_all(response_futures)
2886 .await
2887 .into_iter()
2888 .zip(digest_name_pair)
2889 .filter_map(|(response, (digest, name))| match response {
2890 Ok(response) => match response {
2891 CheckpointResponseV2 {
2892 checkpoint: Some(CheckpointSummaryResponse::Pending(summary)),
2893 contents: Some(contents),
2894 } => Some((*name, *digest, summary, contents)),
2895 CheckpointResponseV2 {
2896 checkpoint: Some(CheckpointSummaryResponse::Certified(_)),
2897 contents: _,
2898 } => {
2899 panic!("Expected pending checkpoint, but got certified checkpoint");
2900 }
2901 CheckpointResponseV2 {
2902 checkpoint: None,
2903 contents: _,
2904 } => {
2905 error!(
2906 "Summary for checkpoint {:?} not found on validator {:?}",
2907 local_summary.sequence_number, name
2908 );
2909 None
2910 }
2911 CheckpointResponseV2 {
2912 checkpoint: _,
2913 contents: None,
2914 } => {
2915 error!(
2916 "Contents for checkpoint {:?} not found on validator {:?}",
2917 local_summary.sequence_number, name
2918 );
2919 None
2920 }
2921 },
2922 Err(e) => {
2923 error!(
2924 "Failed to get checkpoint contents from validator for fork diagnostics: {:?}",
2925 e
2926 );
2927 None
2928 }
2929 })
2930 .collect::<Vec<_>>();
2931
2932 let local_checkpoint_contents = tables
2933 .get_checkpoint_contents(&local_summary.content_digest)
2934 .unwrap_or_else(|_| {
2935 panic!(
2936 "Could not find checkpoint contents for digest {:?}",
2937 local_summary.digest()
2938 )
2939 })
2940 .unwrap_or_else(|| {
2941 panic!(
2942 "Could not find local full checkpoint contents for checkpoint {:?}, digest {:?}",
2943 local_summary.sequence_number,
2944 local_summary.digest()
2945 )
2946 });
2947 let local_contents_text = format!("{local_checkpoint_contents:?}");
2948
2949 let local_summary_text = format!("{local_summary:?}");
2950 let local_validator = state.name.concise();
2951 let diff_patches = response_data
2952 .iter()
2953 .map(|(name, other_digest, other_summary, contents)| {
2954 let other_contents_text = format!("{contents:?}");
2955 let other_summary_text = format!("{other_summary:?}");
2956 let (local_transactions, local_effects): (Vec<_>, Vec<_>) = local_checkpoint_contents
2957 .enumerate_transactions(&local_summary)
2958 .map(|(_, exec_digest)| (exec_digest.transaction, exec_digest.effects))
2959 .unzip();
2960 let (other_transactions, other_effects): (Vec<_>, Vec<_>) = contents
2961 .enumerate_transactions(other_summary)
2962 .map(|(_, exec_digest)| (exec_digest.transaction, exec_digest.effects))
2963 .unzip();
2964 let summary_patch = create_patch(&local_summary_text, &other_summary_text);
2965 let contents_patch = create_patch(&local_contents_text, &other_contents_text);
2966 let local_transactions_text = format!("{local_transactions:#?}");
2967 let other_transactions_text = format!("{other_transactions:#?}");
2968 let transactions_patch =
2969 create_patch(&local_transactions_text, &other_transactions_text);
2970 let local_effects_text = format!("{local_effects:#?}");
2971 let other_effects_text = format!("{other_effects:#?}");
2972 let effects_patch = create_patch(&local_effects_text, &other_effects_text);
2973 let seq_number = local_summary.sequence_number;
2974 let local_digest = local_summary.digest();
2975 let other_validator = name.concise();
2976 format!(
2977 "Checkpoint: {seq_number:?}\n\
2978 Local validator (original): {local_validator:?}, digest: {local_digest:?}\n\
2979 Other validator (modified): {other_validator:?}, digest: {other_digest:?}\n\n\
2980 Summary Diff: \n{summary_patch}\n\n\
2981 Contents Diff: \n{contents_patch}\n\n\
2982 Transactions Diff: \n{transactions_patch}\n\n\
2983 Effects Diff: \n{effects_patch}",
2984 )
2985 })
2986 .collect::<Vec<_>>()
2987 .join("\n\n\n");
2988
2989 let header = format!(
2990 "Checkpoint Fork Dump - Authority {local_validator:?}: \n\
2991 Datetime: {:?}",
2992 time
2993 );
2994 let fork_logs_text = format!("{header}\n\n{diff_patches}\n\n");
2995 let path = tempfile::tempdir()
2996 .expect("Failed to create tempdir")
2997 .keep()
2998 .join(Path::new("checkpoint_fork_dump.txt"));
2999 let mut file = File::create(path).unwrap();
3000 write!(file, "{}", fork_logs_text).unwrap();
3001 debug!("{}", fork_logs_text);
3002}
3003
3004pub trait CheckpointServiceNotify {
3005 fn notify_checkpoint_signature(
3006 &self,
3007 epoch_store: &AuthorityPerEpochStore,
3008 info: &CheckpointSignatureMessage,
3009 ) -> SuiResult;
3010
3011 fn notify_checkpoint(&self) -> SuiResult;
3012}
3013
3014#[allow(clippy::large_enum_variant)]
3015enum CheckpointServiceState {
3016 Unstarted(
3017 (
3018 CheckpointBuilder,
3019 CheckpointAggregator,
3020 CheckpointStateHasher,
3021 ),
3022 ),
3023 Started,
3024}
3025
3026impl CheckpointServiceState {
3027 fn take_unstarted(
3028 &mut self,
3029 ) -> (
3030 CheckpointBuilder,
3031 CheckpointAggregator,
3032 CheckpointStateHasher,
3033 ) {
3034 let mut state = CheckpointServiceState::Started;
3035 std::mem::swap(self, &mut state);
3036
3037 match state {
3038 CheckpointServiceState::Unstarted((builder, aggregator, hasher)) => {
3039 (builder, aggregator, hasher)
3040 }
3041 CheckpointServiceState::Started => panic!("CheckpointServiceState is already started"),
3042 }
3043 }
3044}
3045
3046pub struct CheckpointService {
3047 tables: Arc<CheckpointStore>,
3048 notify_builder: Arc<Notify>,
3049 notify_aggregator: Arc<Notify>,
3050 last_signature_index: Mutex<u64>,
3051 highest_currently_built_seq_tx: watch::Sender<CheckpointSequenceNumber>,
3053 highest_previously_built_seq: CheckpointSequenceNumber,
3056 metrics: Arc<CheckpointMetrics>,
3057 state: Mutex<CheckpointServiceState>,
3058}
3059
3060impl CheckpointService {
3061 pub fn build(
3063 state: Arc<AuthorityState>,
3064 checkpoint_store: Arc<CheckpointStore>,
3065 epoch_store: Arc<AuthorityPerEpochStore>,
3066 effects_store: Arc<dyn TransactionCacheRead>,
3067 global_state_hasher: Weak<GlobalStateHasher>,
3068 checkpoint_output: Box<dyn CheckpointOutput>,
3069 certified_checkpoint_output: Box<dyn CertifiedCheckpointOutput>,
3070 metrics: Arc<CheckpointMetrics>,
3071 max_transactions_per_checkpoint: usize,
3072 max_checkpoint_size_bytes: usize,
3073 ) -> Arc<Self> {
3074 info!(
3075 "Starting checkpoint service with {max_transactions_per_checkpoint} max_transactions_per_checkpoint and {max_checkpoint_size_bytes} max_checkpoint_size_bytes"
3076 );
3077 let notify_builder = Arc::new(Notify::new());
3078 let notify_aggregator = Arc::new(Notify::new());
3079
3080 let highest_previously_built_seq = checkpoint_store
3082 .get_latest_locally_computed_checkpoint()
3083 .expect("failed to get latest locally computed checkpoint")
3084 .map(|s| s.sequence_number)
3085 .unwrap_or(0);
3086
3087 let highest_currently_built_seq =
3088 CheckpointBuilder::load_last_built_checkpoint_summary(&epoch_store, &checkpoint_store)
3089 .expect("epoch should not have ended")
3090 .map(|(seq, _)| seq)
3091 .unwrap_or(0);
3092
3093 let (highest_currently_built_seq_tx, _) = watch::channel(highest_currently_built_seq);
3094
3095 let aggregator = CheckpointAggregator::new(
3096 checkpoint_store.clone(),
3097 epoch_store.clone(),
3098 notify_aggregator.clone(),
3099 certified_checkpoint_output,
3100 state.clone(),
3101 metrics.clone(),
3102 );
3103
3104 let (send_to_hasher, receive_from_builder) = mpsc::channel(16);
3105
3106 let ckpt_state_hasher = CheckpointStateHasher::new(
3107 epoch_store.clone(),
3108 global_state_hasher.clone(),
3109 receive_from_builder,
3110 );
3111
3112 let builder = CheckpointBuilder::new(
3113 state.clone(),
3114 checkpoint_store.clone(),
3115 epoch_store.clone(),
3116 notify_builder.clone(),
3117 effects_store,
3118 global_state_hasher,
3119 send_to_hasher,
3120 checkpoint_output,
3121 notify_aggregator.clone(),
3122 highest_currently_built_seq_tx.clone(),
3123 metrics.clone(),
3124 max_transactions_per_checkpoint,
3125 max_checkpoint_size_bytes,
3126 );
3127
3128 let last_signature_index = epoch_store
3129 .get_last_checkpoint_signature_index()
3130 .expect("should not cross end of epoch");
3131 let last_signature_index = Mutex::new(last_signature_index);
3132
3133 Arc::new(Self {
3134 tables: checkpoint_store,
3135 notify_builder,
3136 notify_aggregator,
3137 last_signature_index,
3138 highest_currently_built_seq_tx,
3139 highest_previously_built_seq,
3140 metrics,
3141 state: Mutex::new(CheckpointServiceState::Unstarted((
3142 builder,
3143 aggregator,
3144 ckpt_state_hasher,
3145 ))),
3146 })
3147 }
3148
3149 pub async fn spawn(
3157 &self,
3158 epoch_store: Arc<AuthorityPerEpochStore>,
3159 consensus_replay_waiter: Option<ReplayWaiter>,
3160 ) {
3161 let (builder, aggregator, state_hasher) = self.state.lock().take_unstarted();
3162
3163 if let Some(last_committed_seq) = self
3166 .tables
3167 .get_highest_executed_checkpoint()
3168 .expect("Failed to get highest executed checkpoint")
3169 .map(|checkpoint| *checkpoint.sequence_number())
3170 {
3171 if let Err(e) = builder
3172 .epoch_store
3173 .clear_state_hashes_after_checkpoint(last_committed_seq)
3174 {
3175 error!(
3176 "Failed to clear state hashes after checkpoint {}: {:?}",
3177 last_committed_seq, e
3178 );
3179 } else {
3180 info!(
3181 "Cleared state hashes after checkpoint {} to ensure consistent ECMH computation",
3182 last_committed_seq
3183 );
3184 }
3185 }
3186
3187 let (builder_finished_tx, builder_finished_rx) = tokio::sync::oneshot::channel();
3188
3189 let state_hasher_task = spawn_monitored_task!(state_hasher.run());
3190 let aggregator_task = spawn_monitored_task!(aggregator.run());
3191
3192 spawn_monitored_task!(async move {
3193 epoch_store
3194 .within_alive_epoch(async move {
3195 builder.run(consensus_replay_waiter).await;
3196 builder_finished_tx.send(()).ok();
3197 })
3198 .await
3199 .ok();
3200
3201 state_hasher_task
3203 .await
3204 .expect("state hasher should exit normally");
3205
3206 aggregator_task.abort();
3209 aggregator_task.await.ok();
3210 });
3211
3212 if tokio::time::timeout(Duration::from_secs(120), async move {
3218 tokio::select! {
3219 _ = builder_finished_rx => { debug!("CheckpointBuilder finished"); }
3220 _ = self.wait_for_rebuilt_checkpoints() => (),
3221 }
3222 })
3223 .await
3224 .is_err()
3225 {
3226 debug_fatal!("Timed out waiting for checkpoints to be rebuilt");
3227 }
3228 }
3229}
3230
3231impl CheckpointService {
3232 pub async fn wait_for_rebuilt_checkpoints(&self) {
3238 let highest_previously_built_seq = self.highest_previously_built_seq;
3239 let mut rx = self.highest_currently_built_seq_tx.subscribe();
3240 let mut highest_currently_built_seq = *rx.borrow_and_update();
3241 info!(
3242 "Waiting for checkpoints to be rebuilt, previously built seq: {highest_previously_built_seq}, currently built seq: {highest_currently_built_seq}"
3243 );
3244 loop {
3245 if highest_currently_built_seq >= highest_previously_built_seq {
3246 info!("Checkpoint rebuild complete");
3247 break;
3248 }
3249 rx.changed().await.unwrap();
3250 highest_currently_built_seq = *rx.borrow_and_update();
3251 }
3252 }
3253
3254 #[cfg(test)]
3255 fn write_and_notify_checkpoint_for_testing(
3256 &self,
3257 epoch_store: &AuthorityPerEpochStore,
3258 checkpoint: PendingCheckpoint,
3259 ) -> SuiResult {
3260 use crate::authority::authority_per_epoch_store::consensus_quarantine::ConsensusCommitOutput;
3261
3262 let mut output = ConsensusCommitOutput::new(0);
3263 epoch_store.write_pending_checkpoint(&mut output, &checkpoint)?;
3264 output.set_default_commit_stats_for_testing();
3265 epoch_store.push_consensus_output_for_tests(output);
3266 self.notify_checkpoint()?;
3267 Ok(())
3268 }
3269}
3270
3271impl CheckpointServiceNotify for CheckpointService {
3272 fn notify_checkpoint_signature(
3273 &self,
3274 epoch_store: &AuthorityPerEpochStore,
3275 info: &CheckpointSignatureMessage,
3276 ) -> SuiResult {
3277 let sequence = info.summary.sequence_number;
3278 let signer = info.summary.auth_sig().authority.concise();
3279
3280 if let Some(highest_verified_checkpoint) = self
3281 .tables
3282 .get_highest_verified_checkpoint()?
3283 .map(|x| *x.sequence_number())
3284 && sequence <= highest_verified_checkpoint
3285 {
3286 trace!(
3287 checkpoint_seq = sequence,
3288 "Ignore checkpoint signature from {} - already certified", signer,
3289 );
3290 self.metrics
3291 .last_ignored_checkpoint_signature_received
3292 .set(sequence as i64);
3293 return Ok(());
3294 }
3295 trace!(
3296 checkpoint_seq = sequence,
3297 "Received checkpoint signature, digest {} from {}",
3298 info.summary.digest(),
3299 signer,
3300 );
3301 self.metrics
3302 .last_received_checkpoint_signatures
3303 .with_label_values(&[&signer.to_string()])
3304 .set(sequence as i64);
3305 let mut index = self.last_signature_index.lock();
3308 *index += 1;
3309 epoch_store.insert_checkpoint_signature(sequence, *index, info)?;
3310 self.notify_aggregator.notify_one();
3311 Ok(())
3312 }
3313
3314 fn notify_checkpoint(&self) -> SuiResult {
3315 self.notify_builder.notify_one();
3316 Ok(())
3317 }
3318}
3319
3320pub struct CheckpointServiceNoop {}
3322impl CheckpointServiceNotify for CheckpointServiceNoop {
3323 fn notify_checkpoint_signature(
3324 &self,
3325 _: &AuthorityPerEpochStore,
3326 _: &CheckpointSignatureMessage,
3327 ) -> SuiResult {
3328 Ok(())
3329 }
3330
3331 fn notify_checkpoint(&self) -> SuiResult {
3332 Ok(())
3333 }
3334}
3335
3336impl PendingCheckpoint {
3337 pub fn height(&self) -> CheckpointHeight {
3338 self.details.checkpoint_height
3339 }
3340
3341 pub fn roots(&self) -> &Vec<TransactionKey> {
3342 &self.roots
3343 }
3344
3345 pub fn details(&self) -> &PendingCheckpointInfo {
3346 &self.details
3347 }
3348}
3349
3350pin_project! {
3351 pub struct PollCounter<Fut> {
3352 #[pin]
3353 future: Fut,
3354 count: usize,
3355 }
3356}
3357
3358impl<Fut> PollCounter<Fut> {
3359 pub fn new(future: Fut) -> Self {
3360 Self { future, count: 0 }
3361 }
3362
3363 pub fn count(&self) -> usize {
3364 self.count
3365 }
3366}
3367
3368impl<Fut: Future> Future for PollCounter<Fut> {
3369 type Output = (usize, Fut::Output);
3370
3371 fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
3372 let this = self.project();
3373 *this.count += 1;
3374 match this.future.poll(cx) {
3375 Poll::Ready(output) => Poll::Ready((*this.count, output)),
3376 Poll::Pending => Poll::Pending,
3377 }
3378 }
3379}
3380
3381fn poll_count<Fut>(future: Fut) -> PollCounter<Fut> {
3382 PollCounter::new(future)
3383}
3384
3385#[cfg(test)]
3386mod tests {
3387 use super::*;
3388 use crate::authority::test_authority_builder::TestAuthorityBuilder;
3389 use crate::transaction_outputs::TransactionOutputs;
3390 use fastcrypto_zkp::bn254::zk_login::{JWK, JwkId};
3391 use futures::FutureExt as _;
3392 use futures::future::BoxFuture;
3393 use std::collections::HashMap;
3394 use std::ops::Deref;
3395 use sui_macros::sim_test;
3396 use sui_protocol_config::{Chain, ProtocolConfig};
3397 use sui_types::accumulator_event::AccumulatorEvent;
3398 use sui_types::authenticator_state::ActiveJwk;
3399 use sui_types::base_types::{SequenceNumber, TransactionEffectsDigest};
3400 use sui_types::crypto::Signature;
3401 use sui_types::effects::{TransactionEffects, TransactionEvents};
3402 use sui_types::messages_checkpoint::SignedCheckpointSummary;
3403 use sui_types::transaction::VerifiedTransaction;
3404 use tokio::sync::mpsc;
3405
3406 #[tokio::test]
3407 async fn test_clear_locally_computed_checkpoints_from_deletes_inclusive_range() {
3408 let store = CheckpointStore::new_for_tests();
3409 let protocol = sui_protocol_config::ProtocolConfig::get_for_max_version_UNSAFE();
3410 for seq in 70u64..=80u64 {
3411 let contents =
3412 sui_types::messages_checkpoint::CheckpointContents::new_with_digests_only_for_tests(
3413 [sui_types::base_types::ExecutionDigests::new(
3414 sui_types::digests::TransactionDigest::random(),
3415 sui_types::digests::TransactionEffectsDigest::ZERO,
3416 )],
3417 );
3418 let summary = sui_types::messages_checkpoint::CheckpointSummary::new(
3419 &protocol,
3420 0,
3421 seq,
3422 0,
3423 &contents,
3424 None,
3425 sui_types::gas::GasCostSummary::default(),
3426 None,
3427 0,
3428 Vec::new(),
3429 Vec::new(),
3430 );
3431 store
3432 .tables
3433 .locally_computed_checkpoints
3434 .insert(&seq, &summary)
3435 .unwrap();
3436 }
3437
3438 store
3439 .clear_locally_computed_checkpoints_from(76)
3440 .expect("clear should succeed");
3441
3442 assert!(
3444 store
3445 .tables
3446 .locally_computed_checkpoints
3447 .get(&75)
3448 .unwrap()
3449 .is_some()
3450 );
3451 assert!(
3452 store
3453 .tables
3454 .locally_computed_checkpoints
3455 .get(&76)
3456 .unwrap()
3457 .is_none()
3458 );
3459
3460 for seq in 70u64..76u64 {
3461 assert!(
3462 store
3463 .tables
3464 .locally_computed_checkpoints
3465 .get(&seq)
3466 .unwrap()
3467 .is_some()
3468 );
3469 }
3470 for seq in 76u64..=80u64 {
3471 assert!(
3472 store
3473 .tables
3474 .locally_computed_checkpoints
3475 .get(&seq)
3476 .unwrap()
3477 .is_none()
3478 );
3479 }
3480 }
3481
3482 #[tokio::test]
3483 async fn test_fork_detection_storage() {
3484 let store = CheckpointStore::new_for_tests();
3485 let seq_num = 42;
3487 let digest = CheckpointDigest::random();
3488
3489 assert!(store.get_checkpoint_fork_detected().unwrap().is_none());
3490
3491 store
3492 .record_checkpoint_fork_detected(seq_num, digest)
3493 .unwrap();
3494
3495 let retrieved = store.get_checkpoint_fork_detected().unwrap();
3496 assert!(retrieved.is_some());
3497 let (retrieved_seq, retrieved_digest) = retrieved.unwrap();
3498 assert_eq!(retrieved_seq, seq_num);
3499 assert_eq!(retrieved_digest, digest);
3500
3501 store.clear_checkpoint_fork_detected().unwrap();
3502 assert!(store.get_checkpoint_fork_detected().unwrap().is_none());
3503
3504 let tx_digest = TransactionDigest::random();
3506 let expected_effects = TransactionEffectsDigest::random();
3507 let actual_effects = TransactionEffectsDigest::random();
3508
3509 assert!(store.get_transaction_fork_detected().unwrap().is_none());
3510
3511 store
3512 .record_transaction_fork_detected(tx_digest, expected_effects, actual_effects)
3513 .unwrap();
3514
3515 let retrieved = store.get_transaction_fork_detected().unwrap();
3516 assert!(retrieved.is_some());
3517 let (retrieved_tx, retrieved_expected, retrieved_actual) = retrieved.unwrap();
3518 assert_eq!(retrieved_tx, tx_digest);
3519 assert_eq!(retrieved_expected, expected_effects);
3520 assert_eq!(retrieved_actual, actual_effects);
3521
3522 store.clear_transaction_fork_detected().unwrap();
3523 assert!(store.get_transaction_fork_detected().unwrap().is_none());
3524 }
3525
3526 #[sim_test]
3527 pub async fn checkpoint_builder_test() {
3528 telemetry_subscribers::init_for_testing();
3529
3530 let mut protocol_config =
3531 ProtocolConfig::get_for_version(ProtocolVersion::max(), Chain::Unknown);
3532 protocol_config.disable_accumulators_for_testing();
3533 protocol_config.set_min_checkpoint_interval_ms_for_testing(100);
3534 let state = TestAuthorityBuilder::new()
3535 .with_protocol_config(protocol_config)
3536 .build()
3537 .await;
3538
3539 let dummy_tx = VerifiedTransaction::new_authenticator_state_update(
3540 0,
3541 0,
3542 vec![],
3543 SequenceNumber::new(),
3544 );
3545
3546 let jwks = {
3547 let mut jwks = Vec::new();
3548 while bcs::to_bytes(&jwks).unwrap().len() < 40_000 {
3549 jwks.push(ActiveJwk {
3550 jwk_id: JwkId::new(
3551 "https://accounts.google.com".to_string(),
3552 "1234567890".to_string(),
3553 ),
3554 jwk: JWK {
3555 kty: "RSA".to_string(),
3556 e: "AQAB".to_string(),
3557 n: "1234567890".to_string(),
3558 alg: "RS256".to_string(),
3559 },
3560 epoch: 0,
3561 });
3562 }
3563 jwks
3564 };
3565
3566 let dummy_tx_with_data =
3567 VerifiedTransaction::new_authenticator_state_update(0, 1, jwks, SequenceNumber::new());
3568
3569 for i in 0..15 {
3570 state
3571 .database_for_testing()
3572 .perpetual_tables
3573 .transactions
3574 .insert(&d(i), dummy_tx.serializable_ref())
3575 .unwrap();
3576 }
3577 for i in 15..20 {
3578 state
3579 .database_for_testing()
3580 .perpetual_tables
3581 .transactions
3582 .insert(&d(i), dummy_tx_with_data.serializable_ref())
3583 .unwrap();
3584 }
3585
3586 let mut store = HashMap::<TransactionDigest, TransactionEffects>::new();
3587 commit_cert_for_test(
3588 &mut store,
3589 state.clone(),
3590 d(1),
3591 vec![d(2), d(3)],
3592 GasCostSummary::new(11, 12, 11, 1),
3593 );
3594 commit_cert_for_test(
3595 &mut store,
3596 state.clone(),
3597 d(2),
3598 vec![d(3), d(4)],
3599 GasCostSummary::new(21, 22, 21, 1),
3600 );
3601 commit_cert_for_test(
3602 &mut store,
3603 state.clone(),
3604 d(3),
3605 vec![],
3606 GasCostSummary::new(31, 32, 31, 1),
3607 );
3608 commit_cert_for_test(
3609 &mut store,
3610 state.clone(),
3611 d(4),
3612 vec![],
3613 GasCostSummary::new(41, 42, 41, 1),
3614 );
3615 for i in [5, 6, 7, 10, 11, 12, 13] {
3616 commit_cert_for_test(
3617 &mut store,
3618 state.clone(),
3619 d(i),
3620 vec![],
3621 GasCostSummary::new(41, 42, 41, 1),
3622 );
3623 }
3624 for i in [15, 16, 17] {
3625 commit_cert_for_test(
3626 &mut store,
3627 state.clone(),
3628 d(i),
3629 vec![],
3630 GasCostSummary::new(51, 52, 51, 1),
3631 );
3632 }
3633 let all_digests: Vec<_> = store.keys().copied().collect();
3634 for digest in all_digests {
3635 let signature = Signature::Ed25519SuiSignature(Default::default()).into();
3636 state
3637 .epoch_store_for_testing()
3638 .test_insert_user_signature(digest, vec![(signature, None)]);
3639 }
3640
3641 let (output, mut result) = mpsc::channel::<(CheckpointContents, CheckpointSummary)>(10);
3642 let (certified_output, mut certified_result) =
3643 mpsc::channel::<CertifiedCheckpointSummary>(10);
3644 let store = Arc::new(store);
3645
3646 let ckpt_dir = tempfile::tempdir().unwrap();
3647 let checkpoint_store =
3648 CheckpointStore::new(ckpt_dir.path(), Arc::new(PrunerWatermarks::default()));
3649 let epoch_store = state.epoch_store_for_testing();
3650
3651 let global_state_hasher = Arc::new(GlobalStateHasher::new_for_tests(
3652 state.get_global_state_hash_store().clone(),
3653 ));
3654
3655 let checkpoint_service = CheckpointService::build(
3656 state.clone(),
3657 checkpoint_store,
3658 epoch_store.clone(),
3659 store,
3660 Arc::downgrade(&global_state_hasher),
3661 Box::new(output),
3662 Box::new(certified_output),
3663 CheckpointMetrics::new_for_tests(),
3664 3,
3665 100_000,
3666 );
3667 checkpoint_service.spawn(epoch_store.clone(), None).await;
3668
3669 checkpoint_service
3670 .write_and_notify_checkpoint_for_testing(&epoch_store, p(0, vec![4], 0))
3671 .unwrap();
3672 checkpoint_service
3673 .write_and_notify_checkpoint_for_testing(&epoch_store, p(1, vec![1, 3], 2000))
3674 .unwrap();
3675 checkpoint_service
3676 .write_and_notify_checkpoint_for_testing(&epoch_store, p(2, vec![10, 11, 12, 13], 3000))
3677 .unwrap();
3678 checkpoint_service
3679 .write_and_notify_checkpoint_for_testing(&epoch_store, p(3, vec![15, 16, 17], 4000))
3680 .unwrap();
3681 checkpoint_service
3682 .write_and_notify_checkpoint_for_testing(&epoch_store, p(4, vec![5], 4001))
3683 .unwrap();
3684 checkpoint_service
3685 .write_and_notify_checkpoint_for_testing(&epoch_store, p(5, vec![6], 5000))
3686 .unwrap();
3687
3688 let (c1c, c1s) = result.recv().await.unwrap();
3689 let (c2c, c2s) = result.recv().await.unwrap();
3690
3691 let c1t = c1c.iter().map(|d| d.transaction).collect::<Vec<_>>();
3692 let c2t = c2c.iter().map(|d| d.transaction).collect::<Vec<_>>();
3693 assert_eq!(c1t, vec![d(4)]);
3694 assert_eq!(c1s.previous_digest, None);
3695 assert_eq!(c1s.sequence_number, 0);
3696 assert_eq!(
3697 c1s.epoch_rolling_gas_cost_summary,
3698 GasCostSummary::new(41, 42, 41, 1)
3699 );
3700
3701 assert_eq!(c2t, vec![d(3), d(2), d(1)]);
3702 assert_eq!(c2s.previous_digest, Some(c1s.digest()));
3703 assert_eq!(c2s.sequence_number, 1);
3704 assert_eq!(
3705 c2s.epoch_rolling_gas_cost_summary,
3706 GasCostSummary::new(104, 108, 104, 4)
3707 );
3708
3709 let (c3c, c3s) = result.recv().await.unwrap();
3712 let c3t = c3c.iter().map(|d| d.transaction).collect::<Vec<_>>();
3713 let (c4c, c4s) = result.recv().await.unwrap();
3714 let c4t = c4c.iter().map(|d| d.transaction).collect::<Vec<_>>();
3715 assert_eq!(c3s.sequence_number, 2);
3716 assert_eq!(c3s.previous_digest, Some(c2s.digest()));
3717 assert_eq!(c4s.sequence_number, 3);
3718 assert_eq!(c4s.previous_digest, Some(c3s.digest()));
3719 assert_eq!(c3t, vec![d(10), d(11), d(12)]);
3720 assert_eq!(c4t, vec![d(13)]);
3721
3722 let (c5c, c5s) = result.recv().await.unwrap();
3725 let c5t = c5c.iter().map(|d| d.transaction).collect::<Vec<_>>();
3726 let (c6c, c6s) = result.recv().await.unwrap();
3727 let c6t = c6c.iter().map(|d| d.transaction).collect::<Vec<_>>();
3728 assert_eq!(c5s.sequence_number, 4);
3729 assert_eq!(c5s.previous_digest, Some(c4s.digest()));
3730 assert_eq!(c6s.sequence_number, 5);
3731 assert_eq!(c6s.previous_digest, Some(c5s.digest()));
3732 assert_eq!(c5t, vec![d(15), d(16)]);
3733 assert_eq!(c6t, vec![d(17)]);
3734
3735 let (c7c, c7s) = result.recv().await.unwrap();
3738 let c7t = c7c.iter().map(|d| d.transaction).collect::<Vec<_>>();
3739 assert_eq!(c7t, vec![d(5), d(6)]);
3740 assert_eq!(c7s.previous_digest, Some(c6s.digest()));
3741 assert_eq!(c7s.sequence_number, 6);
3742
3743 let c1ss = SignedCheckpointSummary::new(c1s.epoch, c1s, state.secret.deref(), state.name);
3744 let c2ss = SignedCheckpointSummary::new(c2s.epoch, c2s, state.secret.deref(), state.name);
3745
3746 checkpoint_service
3747 .notify_checkpoint_signature(
3748 &epoch_store,
3749 &CheckpointSignatureMessage { summary: c2ss },
3750 )
3751 .unwrap();
3752 checkpoint_service
3753 .notify_checkpoint_signature(
3754 &epoch_store,
3755 &CheckpointSignatureMessage { summary: c1ss },
3756 )
3757 .unwrap();
3758
3759 let c1sc = certified_result.recv().await.unwrap();
3760 let c2sc = certified_result.recv().await.unwrap();
3761 assert_eq!(c1sc.sequence_number, 0);
3762 assert_eq!(c2sc.sequence_number, 1);
3763 }
3764
3765 impl TransactionCacheRead for HashMap<TransactionDigest, TransactionEffects> {
3766 fn notify_read_executed_effects(
3767 &self,
3768 _: &str,
3769 digests: &[TransactionDigest],
3770 ) -> BoxFuture<'_, Vec<TransactionEffects>> {
3771 std::future::ready(
3772 digests
3773 .iter()
3774 .map(|d| self.get(d).expect("effects not found").clone())
3775 .collect(),
3776 )
3777 .boxed()
3778 }
3779
3780 fn notify_read_executed_effects_digests(
3781 &self,
3782 _: &str,
3783 digests: &[TransactionDigest],
3784 ) -> BoxFuture<'_, Vec<TransactionEffectsDigest>> {
3785 std::future::ready(
3786 digests
3787 .iter()
3788 .map(|d| {
3789 self.get(d)
3790 .map(|fx| fx.digest())
3791 .expect("effects not found")
3792 })
3793 .collect(),
3794 )
3795 .boxed()
3796 }
3797
3798 fn multi_get_executed_effects(
3799 &self,
3800 digests: &[TransactionDigest],
3801 ) -> Vec<Option<TransactionEffects>> {
3802 digests.iter().map(|d| self.get(d).cloned()).collect()
3803 }
3804
3805 fn multi_get_transaction_blocks(
3811 &self,
3812 _: &[TransactionDigest],
3813 ) -> Vec<Option<Arc<VerifiedTransaction>>> {
3814 unimplemented!()
3815 }
3816
3817 fn multi_get_executed_effects_digests(
3818 &self,
3819 _: &[TransactionDigest],
3820 ) -> Vec<Option<TransactionEffectsDigest>> {
3821 unimplemented!()
3822 }
3823
3824 fn multi_get_effects(
3825 &self,
3826 _: &[TransactionEffectsDigest],
3827 ) -> Vec<Option<TransactionEffects>> {
3828 unimplemented!()
3829 }
3830
3831 fn multi_get_events(&self, _: &[TransactionDigest]) -> Vec<Option<TransactionEvents>> {
3832 unimplemented!()
3833 }
3834
3835 fn get_mysticeti_fastpath_outputs(
3836 &self,
3837 _: &TransactionDigest,
3838 ) -> Option<Arc<TransactionOutputs>> {
3839 unimplemented!()
3840 }
3841
3842 fn notify_read_fastpath_transaction_outputs<'a>(
3843 &'a self,
3844 _: &'a [TransactionDigest],
3845 ) -> BoxFuture<'a, Vec<Arc<crate::transaction_outputs::TransactionOutputs>>> {
3846 unimplemented!()
3847 }
3848
3849 fn take_accumulator_events(&self, _: &TransactionDigest) -> Option<Vec<AccumulatorEvent>> {
3850 unimplemented!()
3851 }
3852
3853 fn get_unchanged_loaded_runtime_objects(
3854 &self,
3855 _digest: &TransactionDigest,
3856 ) -> Option<Vec<sui_types::storage::ObjectKey>> {
3857 unimplemented!()
3858 }
3859
3860 fn transaction_executed_in_last_epoch(&self, _: &TransactionDigest, _: EpochId) -> bool {
3861 unimplemented!()
3862 }
3863 }
3864
3865 #[async_trait::async_trait]
3866 impl CheckpointOutput for mpsc::Sender<(CheckpointContents, CheckpointSummary)> {
3867 async fn checkpoint_created(
3868 &self,
3869 summary: &CheckpointSummary,
3870 contents: &CheckpointContents,
3871 _epoch_store: &Arc<AuthorityPerEpochStore>,
3872 _checkpoint_store: &Arc<CheckpointStore>,
3873 ) -> SuiResult {
3874 self.try_send((contents.clone(), summary.clone())).unwrap();
3875 Ok(())
3876 }
3877 }
3878
3879 #[async_trait::async_trait]
3880 impl CertifiedCheckpointOutput for mpsc::Sender<CertifiedCheckpointSummary> {
3881 async fn certified_checkpoint_created(
3882 &self,
3883 summary: &CertifiedCheckpointSummary,
3884 ) -> SuiResult {
3885 self.try_send(summary.clone()).unwrap();
3886 Ok(())
3887 }
3888 }
3889
3890 fn p(i: u64, t: Vec<u8>, timestamp_ms: u64) -> PendingCheckpoint {
3891 PendingCheckpoint {
3892 roots: t
3893 .into_iter()
3894 .map(|t| TransactionKey::Digest(d(t)))
3895 .collect(),
3896 details: PendingCheckpointInfo {
3897 timestamp_ms,
3898 last_of_epoch: false,
3899 checkpoint_height: i,
3900 consensus_commit_ref: CommitRef::default(),
3901 rejected_transactions_digest: Digest::default(),
3902 },
3903 }
3904 }
3905
3906 fn d(i: u8) -> TransactionDigest {
3907 let mut bytes: [u8; 32] = Default::default();
3908 bytes[0] = i;
3909 TransactionDigest::new(bytes)
3910 }
3911
3912 fn e(
3913 transaction_digest: TransactionDigest,
3914 dependencies: Vec<TransactionDigest>,
3915 gas_used: GasCostSummary,
3916 ) -> TransactionEffects {
3917 let mut effects = TransactionEffects::default();
3918 *effects.transaction_digest_mut_for_testing() = transaction_digest;
3919 *effects.dependencies_mut_for_testing() = dependencies;
3920 *effects.gas_cost_summary_mut_for_testing() = gas_used;
3921 effects
3922 }
3923
3924 fn commit_cert_for_test(
3925 store: &mut HashMap<TransactionDigest, TransactionEffects>,
3926 state: Arc<AuthorityState>,
3927 digest: TransactionDigest,
3928 dependencies: Vec<TransactionDigest>,
3929 gas_used: GasCostSummary,
3930 ) {
3931 let epoch_store = state.epoch_store_for_testing();
3932 let effects = e(digest, dependencies, gas_used);
3933 store.insert(digest, effects.clone());
3934 epoch_store.insert_executed_in_epoch(&digest);
3935 }
3936}