sui_core/checkpoints/checkpoint_executor/
mod.rs

1// Copyright (c) Mysten Labs, Inc.
2// SPDX-License-Identifier: Apache-2.0
3
4//! CheckpointExecutor is a Node component that executes all checkpoints for the
5//! given epoch. It acts as a Consumer to StateSync
6//! for newly synced checkpoints, taking these checkpoints and
7//! scheduling and monitoring their execution. Its primary goal is to allow
8//! for catching up to the current checkpoint sequence number of the network
9//! as quickly as possible so that a newly joined, or recovering Node can
10//! participate in a timely manner. To that end, CheckpointExecutor attempts
11//! to saturate the CPU with executor tasks (one per checkpoint), each of which
12//! handle scheduling and awaiting checkpoint transaction execution.
13//!
14//! CheckpointExecutor is made recoverable in the event of Node shutdown by way of a watermark,
15//! highest_executed_checkpoint, which is guaranteed to be updated sequentially in order,
16//! despite checkpoints themselves potentially being executed nonsequentially and in parallel.
17//! CheckpointExecutor parallelizes checkpoints of the same epoch as much as possible.
18//! CheckpointExecutor enforces the invariant that if `run` returns successfully, we have reached the
19//! end of epoch. This allows us to use it as a signal for reconfig.
20
21use futures::StreamExt;
22use mysten_common::{debug_fatal, fatal};
23use parking_lot::Mutex;
24use std::{sync::Arc, time::Instant};
25use sui_types::SUI_ACCUMULATOR_ROOT_OBJECT_ID;
26use sui_types::base_types::SequenceNumber;
27use sui_types::crypto::RandomnessRound;
28use sui_types::inner_temporary_store::PackageStoreWithFallback;
29use sui_types::messages_checkpoint::{CheckpointContents, CheckpointSequenceNumber};
30use sui_types::transaction::{TransactionDataAPI, TransactionKind};
31
32use sui_config::node::{CheckpointExecutorConfig, RunWithRange};
33use sui_macros::fail_point;
34use sui_types::effects::{TransactionEffects, TransactionEffectsAPI};
35use sui_types::executable_transaction::VerifiedExecutableTransaction;
36use sui_types::execution_status::{ExecutionFailureStatus, ExecutionStatus};
37use sui_types::full_checkpoint_content::Checkpoint;
38use sui_types::global_state_hash::GlobalStateHash;
39use sui_types::message_envelope::Message;
40use sui_types::{
41    base_types::{TransactionDigest, TransactionEffectsDigest},
42    messages_checkpoint::VerifiedCheckpoint,
43    transaction::VerifiedTransaction,
44};
45use tap::{TapFallible, TapOptional};
46use tracing::{debug, info, instrument, warn};
47
48use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore;
49use crate::authority::backpressure::BackpressureManager;
50use crate::authority::{AuthorityState, ExecutionEnv};
51use crate::execution_scheduler::ExecutionScheduler;
52use crate::execution_scheduler::execution_scheduler_impl::BarrierDependencyBuilder;
53use crate::global_state_hasher::GlobalStateHasher;
54use crate::{
55    checkpoints::CheckpointStore,
56    execution_cache::{ObjectCacheRead, TransactionCacheRead},
57};
58
59mod data_ingestion_handler;
60pub mod metrics;
61pub(crate) mod utils;
62
63use data_ingestion_handler::{load_checkpoint, store_checkpoint_locally};
64use metrics::CheckpointExecutorMetrics;
65use utils::*;
66
67const CHECKPOINT_PROGRESS_LOG_COUNT_INTERVAL: u64 = 5000;
68
69#[derive(PartialEq, Eq, Debug)]
70pub enum StopReason {
71    EpochComplete,
72    RunWithRangeCondition,
73}
74
75pub(crate) struct CheckpointExecutionData {
76    pub checkpoint: VerifiedCheckpoint,
77    pub checkpoint_contents: CheckpointContents,
78    pub tx_digests: Vec<TransactionDigest>,
79    pub fx_digests: Vec<TransactionEffectsDigest>,
80}
81
82pub(crate) struct CheckpointTransactionData {
83    pub transactions: Vec<VerifiedExecutableTransaction>,
84    pub effects: Vec<TransactionEffects>,
85    pub executed_fx_digests: Vec<Option<TransactionEffectsDigest>>,
86    /// The accumulator versions for the transactions in the checkpoint.
87    /// None only if accumulator is not enabled (either all Some, or all None).
88    /// This information is needed for object balance withdraw processing.
89    /// The vector should be 1:1 with the transactions in the checkpoint.
90    pub accumulator_versions: Vec<Option<SequenceNumber>>,
91}
92
93impl CheckpointTransactionData {
94    pub fn new(
95        transactions: Vec<VerifiedExecutableTransaction>,
96        effects: Vec<TransactionEffects>,
97        executed_fx_digests: Vec<Option<TransactionEffectsDigest>>,
98    ) -> Self {
99        assert_eq!(transactions.len(), effects.len());
100        assert_eq!(transactions.len(), executed_fx_digests.len());
101        let mut accumulator_versions = vec![None; transactions.len()];
102        let mut next_update_index = 0;
103        for (idx, efx) in effects.iter().enumerate() {
104            // Only barrier settlement transactions mutate the accumulator root object.
105            // This filtering detects whether this transaction is a barrier settlement transaction.
106            // And if so we get the old version of the accumulator root object.
107            // Transactions prior to the barrier settlement transaction reads this accumulator version.
108            let acc_version = efx.object_changes().into_iter().find_map(|change| {
109                if change.id == SUI_ACCUMULATOR_ROOT_OBJECT_ID {
110                    change.input_version
111                } else {
112                    None
113                }
114            });
115            if let Some(acc_version) = acc_version {
116                // Set version for transactions between [next_update_index, idx] inclusive.
117                for slot in accumulator_versions
118                    .iter_mut()
119                    .take(idx + 1)
120                    .skip(next_update_index)
121                {
122                    *slot = Some(acc_version);
123                }
124                next_update_index = idx + 1;
125            }
126        }
127        // Either accumulator is not enabled, then next_update_index == 0;
128        // or the last transaction is the barrier settlement transaction, and next_update_index == transactions.len();
129        // or the last transaction is the end of epoch transaction, and next_update_index == transactions.len() - 1.
130        assert!(
131            next_update_index == 0
132                || next_update_index == transactions.len()
133                || (next_update_index == transactions.len() - 1
134                    && transactions
135                        .last()
136                        .unwrap()
137                        .transaction_data()
138                        .is_end_of_epoch_tx())
139        );
140        Self {
141            transactions,
142            effects,
143            executed_fx_digests,
144            accumulator_versions,
145        }
146    }
147}
148pub(crate) struct CheckpointExecutionState {
149    pub data: CheckpointExecutionData,
150
151    state_hasher: Option<GlobalStateHash>,
152    full_data: Option<Checkpoint>,
153}
154
155impl CheckpointExecutionState {
156    pub fn new(data: CheckpointExecutionData) -> Self {
157        Self {
158            data,
159            state_hasher: None,
160            full_data: None,
161        }
162    }
163
164    pub fn new_with_global_state_hasher(
165        data: CheckpointExecutionData,
166        hasher: GlobalStateHash,
167    ) -> Self {
168        Self {
169            data,
170            state_hasher: Some(hasher),
171            full_data: None,
172        }
173    }
174}
175
176macro_rules! finish_stage {
177    ($handle:expr, $stage:ident) => {
178        $handle.finish_stage(PipelineStage::$stage).await;
179    };
180}
181
182pub struct CheckpointExecutor {
183    epoch_store: Arc<AuthorityPerEpochStore>,
184    state: Arc<AuthorityState>,
185    // TODO: We should use RocksDbStore in the executor
186    // to consolidate DB accesses.
187    checkpoint_store: Arc<CheckpointStore>,
188    object_cache_reader: Arc<dyn ObjectCacheRead>,
189    transaction_cache_reader: Arc<dyn TransactionCacheRead>,
190    execution_scheduler: Arc<ExecutionScheduler>,
191    global_state_hasher: Arc<GlobalStateHasher>,
192    backpressure_manager: Arc<BackpressureManager>,
193    config: CheckpointExecutorConfig,
194    metrics: Arc<CheckpointExecutorMetrics>,
195    tps_estimator: Mutex<TPSEstimator>,
196    subscription_service_checkpoint_sender: Option<tokio::sync::mpsc::Sender<Checkpoint>>,
197}
198
199impl CheckpointExecutor {
200    pub fn new(
201        epoch_store: Arc<AuthorityPerEpochStore>,
202        checkpoint_store: Arc<CheckpointStore>,
203        state: Arc<AuthorityState>,
204        global_state_hasher: Arc<GlobalStateHasher>,
205        backpressure_manager: Arc<BackpressureManager>,
206        config: CheckpointExecutorConfig,
207        metrics: Arc<CheckpointExecutorMetrics>,
208        subscription_service_checkpoint_sender: Option<tokio::sync::mpsc::Sender<Checkpoint>>,
209    ) -> Self {
210        Self {
211            epoch_store,
212            state: state.clone(),
213            checkpoint_store,
214            object_cache_reader: state.get_object_cache_reader().clone(),
215            transaction_cache_reader: state.get_transaction_cache_reader().clone(),
216            execution_scheduler: state.execution_scheduler().clone(),
217            global_state_hasher,
218            backpressure_manager,
219            config,
220            metrics,
221            tps_estimator: Mutex::new(TPSEstimator::default()),
222            subscription_service_checkpoint_sender,
223        }
224    }
225
226    pub fn new_for_tests(
227        epoch_store: Arc<AuthorityPerEpochStore>,
228        checkpoint_store: Arc<CheckpointStore>,
229        state: Arc<AuthorityState>,
230        state_hasher: Arc<GlobalStateHasher>,
231    ) -> Self {
232        Self::new(
233            epoch_store,
234            checkpoint_store,
235            state,
236            state_hasher,
237            BackpressureManager::new_for_tests(),
238            Default::default(),
239            CheckpointExecutorMetrics::new_for_tests(),
240            None,
241        )
242    }
243
244    // Gets the next checkpoint to schedule for execution. If the epoch is already
245    // completed, returns None.
246    fn get_next_to_schedule(&self) -> Option<CheckpointSequenceNumber> {
247        // Decide the first checkpoint to schedule for execution.
248        // If we haven't executed anything in the past, we schedule checkpoint 0.
249        // Otherwise we schedule the one after highest executed.
250        let highest_executed = self
251            .checkpoint_store
252            .get_highest_executed_checkpoint()
253            .unwrap();
254
255        if let Some(highest_executed) = &highest_executed
256            && self.epoch_store.epoch() == highest_executed.epoch()
257            && highest_executed.is_last_checkpoint_of_epoch()
258        {
259            // We can arrive at this point if we bump the highest_executed_checkpoint watermark, and then
260            // crash before completing reconfiguration.
261            info!(seq = ?highest_executed.sequence_number, "final checkpoint of epoch has already been executed");
262            return None;
263        }
264
265        Some(
266            highest_executed
267                .as_ref()
268                .map(|c| c.sequence_number() + 1)
269                .unwrap_or_else(|| {
270                    // TODO this invariant may no longer hold once we introduce snapshots
271                    assert_eq!(self.epoch_store.epoch(), 0);
272                    // we need to execute the genesis checkpoint
273                    0
274                }),
275        )
276    }
277
278    /// Execute all checkpoints for the current epoch, ensuring that the node has not
279    /// forked, and return when finished.
280    /// If `run_with_range` is set, execution will stop early.
281    #[instrument(level = "error", skip_all, fields(epoch = ?self.epoch_store.epoch()))]
282    pub async fn run_epoch(self, run_with_range: Option<RunWithRange>) -> StopReason {
283        let _metrics_scope = mysten_metrics::monitored_scope("CheckpointExecutor::run_epoch");
284        info!(?run_with_range, "CheckpointExecutor::run_epoch");
285        debug!(
286            "Checkpoint executor running for epoch {:?}",
287            self.epoch_store.epoch(),
288        );
289
290        // check if we want to run this epoch based on RunWithRange condition value
291        // we want to be inclusive of the defined RunWithRangeEpoch::Epoch
292        // i.e Epoch(N) means we will execute epoch N and stop when reaching N+1
293        if run_with_range.is_some_and(|rwr| rwr.is_epoch_gt(self.epoch_store.epoch())) {
294            info!("RunWithRange condition satisfied at {:?}", run_with_range,);
295            return StopReason::RunWithRangeCondition;
296        };
297
298        self.metrics
299            .checkpoint_exec_epoch
300            .set(self.epoch_store.epoch() as i64);
301
302        let Some(next_to_schedule) = self.get_next_to_schedule() else {
303            return StopReason::EpochComplete;
304        };
305
306        let this = Arc::new(self);
307
308        let concurrency = std::env::var("SUI_CHECKPOINT_EXECUTION_MAX_CONCURRENCY")
309            .ok()
310            .and_then(|s| s.parse().ok())
311            .unwrap_or(this.config.checkpoint_execution_max_concurrency);
312
313        let pipeline_stages = PipelineStages::new(next_to_schedule, this.metrics.clone());
314
315        let final_checkpoint_executed = stream_synced_checkpoints(
316            this.checkpoint_store.clone(),
317            next_to_schedule,
318            run_with_range.and_then(|rwr| rwr.into_checkpoint_bound()),
319        )
320        // Checkpoint loading and execution is parallelized
321        .map(|checkpoint| {
322            let this = this.clone();
323            let pipeline_handle = pipeline_stages.handle(*checkpoint.sequence_number());
324            async move {
325                let pipeline_handle = pipeline_handle.await;
326                tokio::spawn(this.execute_checkpoint(checkpoint, pipeline_handle))
327                    .await
328                    .unwrap()
329            }
330        })
331        .buffered(concurrency)
332        // Take the last value from the stream to determine if we completed the epoch
333        .fold(false, |state, is_final_checkpoint| async move {
334            assert!(
335                !state,
336                "fold can't be called again after the final checkpoint"
337            );
338            is_final_checkpoint
339        })
340        .await;
341
342        if final_checkpoint_executed {
343            StopReason::EpochComplete
344        } else {
345            StopReason::RunWithRangeCondition
346        }
347    }
348}
349
350impl CheckpointExecutor {
351    /// Load all data for a checkpoint, ensure all transactions are executed, and check for forks.
352    #[instrument(level = "info", skip_all, fields(seq = ?checkpoint.sequence_number()))]
353    async fn execute_checkpoint(
354        self: Arc<Self>,
355        checkpoint: VerifiedCheckpoint,
356        mut pipeline_handle: PipelineHandle,
357    ) -> bool /* is final checkpoint */ {
358        info!("executing checkpoint");
359        let sequence_number = checkpoint.sequence_number;
360
361        checkpoint.report_checkpoint_age(
362            &self.metrics.checkpoint_contents_age,
363            &self.metrics.checkpoint_contents_age_ms,
364        );
365        self.backpressure_manager
366            .update_highest_certified_checkpoint(sequence_number);
367
368        if checkpoint.is_last_checkpoint_of_epoch() && sequence_number > 0 {
369            let _wait_for_previous_checkpoints_guard = mysten_metrics::monitored_scope(
370                "CheckpointExecutor::wait_for_previous_checkpoints",
371            );
372
373            info!(
374                "Reached end of epoch checkpoint, waiting for all previous checkpoints to be executed"
375            );
376            self.checkpoint_store
377                .notify_read_executed_checkpoint(sequence_number - 1)
378                .await;
379        }
380
381        let _parallel_step_guard =
382            mysten_metrics::monitored_scope("CheckpointExecutor::parallel_step");
383
384        // Note: only `execute_transactions_from_synced_checkpoint` has end-of-epoch logic.
385        let ckpt_state = if self.state.is_fullnode(&self.epoch_store)
386            || checkpoint.is_last_checkpoint_of_epoch()
387        {
388            self.execute_transactions_from_synced_checkpoint(checkpoint, &mut pipeline_handle)
389                .await
390        } else {
391            self.verify_locally_built_checkpoint(checkpoint, &mut pipeline_handle)
392                .await
393        };
394
395        let tps = self.tps_estimator.lock().update(
396            Instant::now(),
397            ckpt_state.data.checkpoint.network_total_transactions,
398        );
399        self.metrics.checkpoint_exec_sync_tps.set(tps as i64);
400
401        self.backpressure_manager
402            .update_highest_executed_checkpoint(*ckpt_state.data.checkpoint.sequence_number());
403
404        let is_final_checkpoint = ckpt_state.data.checkpoint.is_last_checkpoint_of_epoch();
405
406        let seq = ckpt_state.data.checkpoint.sequence_number;
407
408        let batch = self
409            .state
410            .get_cache_commit()
411            .build_db_batch(self.epoch_store.epoch(), &ckpt_state.data.tx_digests);
412
413        finish_stage!(pipeline_handle, BuildDbBatch);
414
415        let mut ckpt_state = tokio::task::spawn_blocking({
416            let this = self.clone();
417            move || {
418                // Commit all transaction effects to disk
419                let cache_commit = this.state.get_cache_commit();
420                debug!(?seq, "committing checkpoint transactions to disk");
421                cache_commit.commit_transaction_outputs(
422                    this.epoch_store.epoch(),
423                    batch,
424                    &ckpt_state.data.tx_digests,
425                );
426                ckpt_state
427            }
428        })
429        .await
430        .unwrap();
431
432        finish_stage!(pipeline_handle, CommitTransactionOutputs);
433
434        self.epoch_store
435            .handle_finalized_checkpoint(&ckpt_state.data.checkpoint, &ckpt_state.data.tx_digests)
436            .expect("cannot fail");
437
438        let randomness_rounds = self.extract_randomness_rounds(
439            &ckpt_state.data.checkpoint,
440            &ckpt_state.data.checkpoint_contents,
441        );
442
443        // Once the checkpoint is finalized, we know that any randomness contained in this checkpoint has
444        // been successfully included in a checkpoint certified by quorum of validators.
445        // (RandomnessManager/RandomnessReporter is only present on validators.)
446        if let Some(randomness_reporter) = self.epoch_store.randomness_reporter() {
447            for round in randomness_rounds {
448                debug!(
449                    ?round,
450                    "notifying RandomnessReporter that randomness update was executed in checkpoint"
451                );
452                randomness_reporter
453                    .notify_randomness_in_checkpoint(round)
454                    .expect("epoch cannot have ended");
455            }
456        }
457
458        finish_stage!(pipeline_handle, FinalizeCheckpoint);
459
460        if let Some(checkpoint_data) = ckpt_state.full_data.take() {
461            self.commit_index_updates_and_enqueue_to_subscription_service(checkpoint_data)
462                .await;
463        }
464
465        finish_stage!(pipeline_handle, UpdateRpcIndex);
466
467        self.global_state_hasher
468            .accumulate_running_root(&self.epoch_store, seq, ckpt_state.state_hasher)
469            .expect("Failed to accumulate running root");
470
471        if is_final_checkpoint {
472            self.checkpoint_store
473                .insert_epoch_last_checkpoint(self.epoch_store.epoch(), &ckpt_state.data.checkpoint)
474                .expect("Failed to insert epoch last checkpoint");
475
476            self.global_state_hasher
477                .accumulate_epoch(self.epoch_store.clone(), seq)
478                .expect("Accumulating epoch cannot fail");
479
480            self.checkpoint_store
481                .prune_local_summaries()
482                .tap_err(|e| debug_fatal!("Failed to prune local summaries: {}", e))
483                .ok();
484        }
485
486        fail_point!("crash");
487
488        self.bump_highest_executed_checkpoint(&ckpt_state.data.checkpoint);
489
490        finish_stage!(pipeline_handle, BumpHighestExecutedCheckpoint);
491
492        // Important: code after the last pipeline stage is finished can run out of checkpoint order.
493
494        ckpt_state.data.checkpoint.is_last_checkpoint_of_epoch()
495    }
496
497    // On validators, checkpoints have often already been constructed locally, in which
498    // case we can skip many steps of the checkpoint execution process.
499    #[instrument(level = "info", skip_all)]
500    async fn verify_locally_built_checkpoint(
501        &self,
502        checkpoint: VerifiedCheckpoint,
503        pipeline_handle: &mut PipelineHandle,
504    ) -> CheckpointExecutionState {
505        assert!(
506            !checkpoint.is_last_checkpoint_of_epoch(),
507            "only fullnode path has end-of-epoch logic"
508        );
509
510        let sequence_number = checkpoint.sequence_number;
511        let locally_built_checkpoint = self
512            .checkpoint_store
513            .get_locally_computed_checkpoint(sequence_number)
514            .expect("db error");
515
516        let Some(locally_built_checkpoint) = locally_built_checkpoint else {
517            // fall back to tx-by-tx execution path if we are catching up.
518            return self
519                .execute_transactions_from_synced_checkpoint(checkpoint, pipeline_handle)
520                .await;
521        };
522
523        self.metrics.checkpoint_executor_validator_path.inc();
524
525        // Check for fork
526        assert_checkpoint_not_forked(
527            &locally_built_checkpoint,
528            &checkpoint,
529            &self.checkpoint_store,
530        );
531
532        // Checkpoint builder triggers accumulation of the checkpoint, so this is guaranteed to finish.
533        let state_hasher = {
534            let _metrics_scope =
535                mysten_metrics::monitored_scope("CheckpointExecutor::notify_read_state_hasher");
536            self.epoch_store
537                .notify_read_checkpoint_state_hasher(&[sequence_number])
538                .await
539                .unwrap()
540                .pop()
541                .unwrap()
542        };
543
544        let checkpoint_contents = self
545            .checkpoint_store
546            .get_checkpoint_contents(&checkpoint.content_digest)
547            .expect("db error")
548            .expect("checkpoint contents not found");
549
550        let (tx_digests, fx_digests): (Vec<_>, Vec<_>) = checkpoint_contents
551            .iter()
552            .map(|digests| (digests.transaction, digests.effects))
553            .unzip();
554
555        pipeline_handle
556            .skip_to(PipelineStage::FinalizeTransactions)
557            .await;
558
559        // Currently this code only runs on validators, where this method call does nothing.
560        // But in the future, fullnodes may follow the mysticeti dag and build their own checkpoints.
561        self.insert_finalized_transactions(&tx_digests, sequence_number);
562
563        pipeline_handle.skip_to(PipelineStage::BuildDbBatch).await;
564
565        CheckpointExecutionState::new_with_global_state_hasher(
566            CheckpointExecutionData {
567                checkpoint,
568                checkpoint_contents,
569                tx_digests,
570                fx_digests,
571            },
572            state_hasher,
573        )
574    }
575
576    #[instrument(level = "info", skip_all)]
577    async fn execute_transactions_from_synced_checkpoint(
578        &self,
579        checkpoint: VerifiedCheckpoint,
580        pipeline_handle: &mut PipelineHandle,
581    ) -> CheckpointExecutionState {
582        let sequence_number = checkpoint.sequence_number;
583        let (mut ckpt_state, tx_data, unexecuted_tx_digests) = {
584            let _scope =
585                mysten_metrics::monitored_scope("CheckpointExecutor::execute_transactions");
586            let (ckpt_state, tx_data) = self.load_checkpoint_transactions(checkpoint);
587            let unexecuted_tx_digests = self.schedule_transaction_execution(&ckpt_state, &tx_data);
588            (ckpt_state, tx_data, unexecuted_tx_digests)
589        };
590
591        finish_stage!(pipeline_handle, ExecuteTransactions);
592
593        {
594            self.transaction_cache_reader
595                .notify_read_executed_effects_digests(
596                    "CheckpointExecutor::notify_read_executed_effects_digests",
597                    &unexecuted_tx_digests,
598                )
599                .await;
600        }
601
602        finish_stage!(pipeline_handle, WaitForTransactions);
603
604        if ckpt_state.data.checkpoint.is_last_checkpoint_of_epoch() {
605            self.execute_change_epoch_tx(&tx_data).await;
606        }
607
608        let _scope = mysten_metrics::monitored_scope("CheckpointExecutor::finalize_checkpoint");
609
610        if self.state.is_fullnode(&self.epoch_store) {
611            self.state.congestion_tracker.process_checkpoint_effects(
612                &*self.transaction_cache_reader,
613                &ckpt_state.data.checkpoint,
614                &tx_data.effects,
615            );
616        }
617
618        self.insert_finalized_transactions(&ckpt_state.data.tx_digests, sequence_number);
619
620        // The early versions of the hasher (prior to effectsv2) rely on db
621        // state, so we must wait until all transactions have been executed
622        // before accumulating the checkpoint.
623        ckpt_state.state_hasher = Some(
624            self.global_state_hasher
625                .accumulate_checkpoint(&tx_data.effects, sequence_number, &self.epoch_store)
626                .expect("epoch cannot have ended"),
627        );
628
629        finish_stage!(pipeline_handle, FinalizeTransactions);
630
631        ckpt_state.full_data = self.process_checkpoint_data(&ckpt_state.data, &tx_data);
632
633        finish_stage!(pipeline_handle, ProcessCheckpointData);
634
635        ckpt_state
636    }
637
638    fn checkpoint_data_enabled(&self) -> bool {
639        self.subscription_service_checkpoint_sender.is_some()
640            || self.state.rpc_index.is_some()
641            || self.config.data_ingestion_dir.is_some()
642    }
643
644    fn insert_finalized_transactions(
645        &self,
646        tx_digests: &[TransactionDigest],
647        sequence_number: CheckpointSequenceNumber,
648    ) {
649        self.epoch_store
650            .insert_finalized_transactions(tx_digests, sequence_number)
651            .expect("failed to insert finalized transactions");
652
653        if self.state.is_fullnode(&self.epoch_store) {
654            // TODO remove once we no longer need to support this table for read RPC
655            self.state
656                .get_checkpoint_cache()
657                .deprecated_insert_finalized_transactions(
658                    tx_digests,
659                    self.epoch_store.epoch(),
660                    sequence_number,
661                );
662        }
663    }
664
665    #[instrument(level = "info", skip_all)]
666    fn process_checkpoint_data(
667        &self,
668        ckpt_data: &CheckpointExecutionData,
669        tx_data: &CheckpointTransactionData,
670    ) -> Option<Checkpoint> {
671        if !self.checkpoint_data_enabled() {
672            return None;
673        }
674
675        let checkpoint = load_checkpoint(
676            ckpt_data,
677            tx_data,
678            self.state.get_object_store(),
679            &*self.transaction_cache_reader,
680        )
681        .expect("failed to load checkpoint data");
682
683        if self.state.rpc_index.is_some() || self.config.data_ingestion_dir.is_some() {
684            let checkpoint_data = checkpoint.clone().into();
685            // Index the checkpoint. this is done out of order and is not written and committed to the
686            // DB until later (committing must be done in-order)
687            if let Some(rpc_index) = &self.state.rpc_index {
688                let mut layout_resolver = self.epoch_store.executor().type_layout_resolver(
689                    Box::new(PackageStoreWithFallback::new(
690                        self.state.get_backing_package_store(),
691                        &checkpoint_data,
692                    )),
693                );
694
695                rpc_index.index_checkpoint(&checkpoint_data, layout_resolver.as_mut());
696            }
697
698            if let Some(path) = &self.config.data_ingestion_dir {
699                store_checkpoint_locally(path, &checkpoint_data)
700                    .expect("failed to store checkpoint locally");
701            }
702        }
703
704        Some(checkpoint)
705    }
706
707    // Load all required transaction and effects data for the checkpoint.
708    #[instrument(level = "info", skip_all)]
709    fn load_checkpoint_transactions(
710        &self,
711        checkpoint: VerifiedCheckpoint,
712    ) -> (CheckpointExecutionState, CheckpointTransactionData) {
713        let seq = checkpoint.sequence_number;
714        let epoch = checkpoint.epoch;
715
716        let checkpoint_contents = self
717            .checkpoint_store
718            .get_checkpoint_contents(&checkpoint.content_digest)
719            .expect("db error")
720            .expect("checkpoint contents not found");
721
722        // attempt to load full checkpoint contents in bulk
723        // Tolerate db error in case of data corruption.
724        // We will fall back to loading items one-by-one below in case of error.
725        if let Some(full_contents) = self
726            .checkpoint_store
727            .get_full_checkpoint_contents_by_sequence_number(seq)
728            .tap_err(|e| debug_fatal!("Failed to get checkpoint contents from store: {e}"))
729            .ok()
730            .flatten()
731            .tap_some(|_| debug!("loaded full checkpoint contents in bulk for sequence {seq}"))
732        {
733            let num_txns = full_contents.size();
734            let mut tx_digests = Vec::with_capacity(num_txns);
735            let mut transactions = Vec::with_capacity(num_txns);
736            let mut effects = Vec::with_capacity(num_txns);
737            let mut fx_digests = Vec::with_capacity(num_txns);
738
739            full_contents
740                .into_iter()
741                .zip(checkpoint_contents.iter())
742                .for_each(|(execution_data, digests)| {
743                    let tx_digest = digests.transaction;
744                    let fx_digest = digests.effects;
745                    debug_assert_eq!(tx_digest, *execution_data.transaction.digest());
746                    debug_assert_eq!(fx_digest, execution_data.effects.digest());
747
748                    tx_digests.push(tx_digest);
749                    transactions.push(VerifiedExecutableTransaction::new_from_checkpoint(
750                        VerifiedTransaction::new_unchecked(execution_data.transaction),
751                        epoch,
752                        seq,
753                    ));
754                    effects.push(execution_data.effects);
755                    fx_digests.push(fx_digest);
756                });
757
758            let executed_fx_digests = self
759                .transaction_cache_reader
760                .multi_get_executed_effects_digests(&tx_digests);
761
762            (
763                CheckpointExecutionState::new(CheckpointExecutionData {
764                    checkpoint,
765                    checkpoint_contents,
766                    tx_digests,
767                    fx_digests,
768                }),
769                CheckpointTransactionData::new(transactions, effects, executed_fx_digests),
770            )
771        } else {
772            // load items one-by-one
773            // TODO: If we used RocksDbStore in the executor instead,
774            // all the logic below could be removed.
775
776            let digests = checkpoint_contents.inner();
777
778            let (tx_digests, fx_digests): (Vec<_>, Vec<_>) = digests
779                .digests_iter()
780                .map(|d| (d.transaction, d.effects))
781                .unzip();
782            let transactions = self
783                .transaction_cache_reader
784                .multi_get_transaction_blocks(&tx_digests)
785                .into_iter()
786                .enumerate()
787                .map(|(i, tx)| {
788                    let tx = tx
789                        .unwrap_or_else(|| fatal!("transaction not found for {:?}", tx_digests[i]));
790                    let tx = Arc::try_unwrap(tx).unwrap_or_else(|tx| (*tx).clone());
791                    VerifiedExecutableTransaction::new_from_checkpoint(tx, epoch, seq)
792                })
793                .collect();
794            let effects = self
795                .transaction_cache_reader
796                .multi_get_effects(&fx_digests)
797                .into_iter()
798                .enumerate()
799                .map(|(i, effect)| {
800                    effect.unwrap_or_else(|| {
801                        fatal!("checkpoint effect not found for {:?}", digests[i])
802                    })
803                })
804                .collect();
805
806            let executed_fx_digests = self
807                .transaction_cache_reader
808                .multi_get_executed_effects_digests(&tx_digests);
809
810            (
811                CheckpointExecutionState::new(CheckpointExecutionData {
812                    checkpoint,
813                    checkpoint_contents,
814                    tx_digests,
815                    fx_digests,
816                }),
817                CheckpointTransactionData::new(transactions, effects, executed_fx_digests),
818            )
819        }
820    }
821
822    // Schedule all unexecuted transactions in the checkpoint for execution
823    #[instrument(level = "info", skip_all)]
824    fn schedule_transaction_execution(
825        &self,
826        ckpt_state: &CheckpointExecutionState,
827        tx_data: &CheckpointTransactionData,
828    ) -> Vec<TransactionDigest> {
829        let mut barrier_deps_builder = BarrierDependencyBuilder::new();
830
831        // Find unexecuted transactions and their expected effects digests
832        let (unexecuted_tx_digests, unexecuted_txns): (Vec<_>, Vec<_>) = itertools::multiunzip(
833            itertools::izip!(
834                tx_data.transactions.iter(),
835                ckpt_state.data.tx_digests.iter(),
836                ckpt_state.data.fx_digests.iter(),
837                tx_data.effects.iter(),
838                tx_data.executed_fx_digests.iter(),
839                tx_data.accumulator_versions.iter()
840            )
841            .filter_map(
842                |(
843                    txn,
844                    tx_digest,
845                    expected_fx_digest,
846                    effects,
847                    executed_fx_digest,
848                    accumulator_version,
849                )| {
850                    let barrier_deps =
851                        barrier_deps_builder.process_tx(*tx_digest, txn.transaction_data());
852
853                    if let Some(executed_fx_digest) = executed_fx_digest {
854                        assert_not_forked(
855                            &ckpt_state.data.checkpoint,
856                            tx_digest,
857                            expected_fx_digest,
858                            executed_fx_digest,
859                            &*self.transaction_cache_reader,
860                        );
861                        None
862                    } else if txn.transaction_data().is_end_of_epoch_tx() {
863                        None
864                    } else {
865                        let assigned_versions = self
866                            .epoch_store
867                            .acquire_shared_version_assignments_from_effects(
868                                txn,
869                                effects,
870                                *accumulator_version,
871                                &*self.object_cache_reader,
872                            )
873                            .expect("failed to acquire shared version assignments");
874
875                        let mut env = ExecutionEnv::new()
876                            .with_assigned_versions(assigned_versions)
877                            .with_expected_effects_digest(*expected_fx_digest)
878                            .with_barrier_dependencies(barrier_deps);
879
880                        // Check if the expected effects indicate insufficient balance
881                        if let ExecutionStatus::Failure {
882                            error: ExecutionFailureStatus::InsufficientBalanceForWithdraw,
883                            ..
884                        } = effects.status()
885                        {
886                            env = env.with_insufficient_balance();
887                        }
888
889                        Some((tx_digest, (txn.clone(), env)))
890                    }
891                },
892            ),
893        );
894
895        // Enqueue unexecuted transactions with their expected effects digests
896        self.execution_scheduler
897            .enqueue_transactions(unexecuted_txns, &self.epoch_store);
898
899        unexecuted_tx_digests
900    }
901
902    // Execute the change epoch txn
903    #[instrument(level = "error", skip_all)]
904    async fn execute_change_epoch_tx(&self, tx_data: &CheckpointTransactionData) {
905        let change_epoch_tx = tx_data.transactions.last().unwrap();
906        let change_epoch_fx = tx_data.effects.last().unwrap();
907        assert_eq!(
908            change_epoch_tx.digest(),
909            change_epoch_fx.transaction_digest()
910        );
911        assert!(
912            change_epoch_tx.transaction_data().is_end_of_epoch_tx(),
913            "final txn must be an end of epoch txn"
914        );
915
916        // Ordinarily we would assert that the change epoch txn has not been executed yet.
917        // However, during crash recovery, it is possible that we already passed this point and
918        // the txn has been executed. You can uncomment this assert if you are debugging a problem
919        // related to reconfig. If you hit this assert and it is not because of crash-recovery,
920        // it may indicate a bug in the checkpoint executor.
921        //
922        //     if self
923        //         .transaction_cache_reader
924        //         .get_executed_effects(change_epoch_tx.digest())
925        //         .is_some()
926        //     {
927        //         fatal!(
928        //             "end of epoch txn must not have been executed: {:?}",
929        //             change_epoch_tx.digest()
930        //         );
931        //     }
932
933        let assigned_versions = self
934            .epoch_store
935            .acquire_shared_version_assignments_from_effects(
936                change_epoch_tx,
937                change_epoch_fx,
938                None,
939                self.object_cache_reader.as_ref(),
940            )
941            .expect("Acquiring shared version assignments for change_epoch tx cannot fail");
942
943        info!(
944            "scheduling change epoch txn with digest: {:?}, expected effects digest: {:?}, assigned versions: {:?}",
945            change_epoch_tx.digest(),
946            change_epoch_fx.digest(),
947            assigned_versions
948        );
949        self.execution_scheduler.enqueue_transactions(
950            vec![(
951                change_epoch_tx.clone(),
952                ExecutionEnv::new()
953                    .with_assigned_versions(assigned_versions)
954                    .with_expected_effects_digest(change_epoch_fx.digest()),
955            )],
956            &self.epoch_store,
957        );
958
959        self.transaction_cache_reader
960            .notify_read_executed_effects_digests(
961                "CheckpointExecutor::notify_read_advance_epoch_tx",
962                &[*change_epoch_tx.digest()],
963            )
964            .await;
965    }
966
967    // Increment the highest executed checkpoint watermark and prune old full-checkpoint contents
968    #[instrument(level = "debug", skip_all)]
969    fn bump_highest_executed_checkpoint(&self, checkpoint: &VerifiedCheckpoint) {
970        // Ensure that we are not skipping checkpoints at any point
971        let seq = *checkpoint.sequence_number();
972        debug!("Bumping highest_executed_checkpoint watermark to {seq:?}");
973        if let Some(prev_highest) = self
974            .checkpoint_store
975            .get_highest_executed_checkpoint_seq_number()
976            .unwrap()
977        {
978            assert_eq!(prev_highest + 1, seq);
979        } else {
980            assert_eq!(seq, 0);
981        }
982        if seq.is_multiple_of(CHECKPOINT_PROGRESS_LOG_COUNT_INTERVAL) {
983            info!("Finished syncing and executing checkpoint {}", seq);
984        }
985
986        fail_point!("highest-executed-checkpoint");
987
988        // We store a fixed number of additional FullCheckpointContents after execution is complete
989        // for use in state sync.
990        const NUM_SAVED_FULL_CHECKPOINT_CONTENTS: u64 = 5_000;
991        if seq >= NUM_SAVED_FULL_CHECKPOINT_CONTENTS {
992            let prune_seq = seq - NUM_SAVED_FULL_CHECKPOINT_CONTENTS;
993            if let Some(prune_checkpoint) = self
994                .checkpoint_store
995                .get_checkpoint_by_sequence_number(prune_seq)
996                .expect("Failed to fetch checkpoint")
997            {
998                self.checkpoint_store
999                    .delete_full_checkpoint_contents(prune_seq)
1000                    .expect("Failed to delete full checkpoint contents");
1001                self.checkpoint_store
1002                    .delete_contents_digest_sequence_number_mapping(
1003                        &prune_checkpoint.content_digest,
1004                    )
1005                    .expect("Failed to delete contents digest -> sequence number mapping");
1006            } else {
1007                // If this is directly after a snapshot restore with skiplisting,
1008                // this is expected for the first `NUM_SAVED_FULL_CHECKPOINT_CONTENTS`
1009                // checkpoints.
1010                debug!(
1011                    "Failed to fetch checkpoint with sequence number {:?}",
1012                    prune_seq
1013                );
1014            }
1015        }
1016
1017        self.checkpoint_store
1018            .update_highest_executed_checkpoint(checkpoint)
1019            .unwrap();
1020        self.metrics.last_executed_checkpoint.set(seq as i64);
1021
1022        self.metrics
1023            .last_executed_checkpoint_timestamp_ms
1024            .set(checkpoint.timestamp_ms as i64);
1025        checkpoint.report_checkpoint_age(
1026            &self.metrics.last_executed_checkpoint_age,
1027            &self.metrics.last_executed_checkpoint_age_ms,
1028        );
1029    }
1030
1031    /// If configured, commit the pending index updates for the provided checkpoint as well as
1032    /// enqueuing the checkpoint to the subscription service
1033    #[instrument(level = "info", skip_all)]
1034    async fn commit_index_updates_and_enqueue_to_subscription_service(
1035        &self,
1036        checkpoint: Checkpoint,
1037    ) {
1038        if let Some(rpc_index) = &self.state.rpc_index {
1039            rpc_index
1040                .commit_update_for_checkpoint(checkpoint.summary.sequence_number)
1041                .expect("failed to update rpc_indexes");
1042        }
1043
1044        if let Some(sender) = &self.subscription_service_checkpoint_sender
1045            && let Err(e) = sender.send(checkpoint).await
1046        {
1047            warn!("unable to send checkpoint to subscription service: {e}");
1048        }
1049    }
1050
1051    // Extract randomness rounds from the checkpoint version-specific data (if available).
1052    // Otherwise, extract randomness rounds from the first transaction in the checkpoint
1053    #[instrument(level = "debug", skip_all)]
1054    fn extract_randomness_rounds(
1055        &self,
1056        checkpoint: &VerifiedCheckpoint,
1057        checkpoint_contents: &CheckpointContents,
1058    ) -> Vec<RandomnessRound> {
1059        if let Some(version_specific_data) = checkpoint
1060            .version_specific_data(self.epoch_store.protocol_config())
1061            .expect("unable to get version_specific_data")
1062        {
1063            // With version-specific data, randomness rounds are stored in checkpoint summary.
1064            version_specific_data.into_v1().randomness_rounds
1065        } else {
1066            // Before version-specific data, checkpoint batching must be disabled. In this case,
1067            // randomness state update tx must be first if it exists, because all other
1068            // transactions in a checkpoint that includes a randomness state update are causally
1069            // dependent on it.
1070            assert_eq!(
1071                0,
1072                self.epoch_store
1073                    .protocol_config()
1074                    .min_checkpoint_interval_ms_as_option()
1075                    .unwrap_or_default(),
1076            );
1077            if let Some(first_digest) = checkpoint_contents.inner().first_digests() {
1078                let maybe_randomness_tx = self.transaction_cache_reader.get_transaction_block(&first_digest.transaction)
1079                .unwrap_or_else(||
1080                    fatal!(
1081                        "state-sync should have ensured that transaction with digests {first_digest:?} exists for checkpoint: {}",
1082                        checkpoint.sequence_number()
1083                    )
1084                );
1085                if let TransactionKind::RandomnessStateUpdate(rsu) =
1086                    maybe_randomness_tx.data().transaction_data().kind()
1087                {
1088                    vec![rsu.randomness_round]
1089                } else {
1090                    Vec::new()
1091                }
1092            } else {
1093                Vec::new()
1094            }
1095        }
1096    }
1097}