1use futures::StreamExt;
22use mysten_common::{debug_fatal, fatal};
23use parking_lot::Mutex;
24use std::{sync::Arc, time::Instant};
25use sui_types::SUI_ACCUMULATOR_ROOT_OBJECT_ID;
26use sui_types::base_types::SequenceNumber;
27use sui_types::crypto::RandomnessRound;
28use sui_types::inner_temporary_store::PackageStoreWithFallback;
29use sui_types::messages_checkpoint::{CheckpointContents, CheckpointSequenceNumber};
30use sui_types::transaction::{TransactionDataAPI, TransactionKind};
31
32use sui_config::node::{CheckpointExecutorConfig, RunWithRange};
33use sui_macros::fail_point;
34use sui_types::effects::{TransactionEffects, TransactionEffectsAPI};
35use sui_types::executable_transaction::VerifiedExecutableTransaction;
36use sui_types::execution_status::{ExecutionFailureStatus, ExecutionStatus};
37use sui_types::full_checkpoint_content::Checkpoint;
38use sui_types::global_state_hash::GlobalStateHash;
39use sui_types::message_envelope::Message;
40use sui_types::{
41 base_types::{TransactionDigest, TransactionEffectsDigest},
42 messages_checkpoint::VerifiedCheckpoint,
43 transaction::VerifiedTransaction,
44};
45use tap::{TapFallible, TapOptional};
46use tracing::{debug, info, instrument, warn};
47
48use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore;
49use crate::authority::backpressure::BackpressureManager;
50use crate::authority::{AuthorityState, ExecutionEnv};
51use crate::execution_scheduler::ExecutionScheduler;
52use crate::execution_scheduler::execution_scheduler_impl::BarrierDependencyBuilder;
53use crate::global_state_hasher::GlobalStateHasher;
54use crate::{
55 checkpoints::CheckpointStore,
56 execution_cache::{ObjectCacheRead, TransactionCacheRead},
57};
58
59mod data_ingestion_handler;
60pub mod metrics;
61pub(crate) mod utils;
62
63use data_ingestion_handler::{load_checkpoint, store_checkpoint_locally};
64use metrics::CheckpointExecutorMetrics;
65use utils::*;
66
67const CHECKPOINT_PROGRESS_LOG_COUNT_INTERVAL: u64 = 5000;
68
69#[derive(PartialEq, Eq, Debug)]
70pub enum StopReason {
71 EpochComplete,
72 RunWithRangeCondition,
73}
74
75pub(crate) struct CheckpointExecutionData {
76 pub checkpoint: VerifiedCheckpoint,
77 pub checkpoint_contents: CheckpointContents,
78 pub tx_digests: Vec<TransactionDigest>,
79 pub fx_digests: Vec<TransactionEffectsDigest>,
80}
81
82pub(crate) struct CheckpointTransactionData {
83 pub transactions: Vec<VerifiedExecutableTransaction>,
84 pub effects: Vec<TransactionEffects>,
85 pub executed_fx_digests: Vec<Option<TransactionEffectsDigest>>,
86 pub accumulator_versions: Vec<Option<SequenceNumber>>,
91}
92
93impl CheckpointTransactionData {
94 pub fn new(
95 transactions: Vec<VerifiedExecutableTransaction>,
96 effects: Vec<TransactionEffects>,
97 executed_fx_digests: Vec<Option<TransactionEffectsDigest>>,
98 ) -> Self {
99 assert_eq!(transactions.len(), effects.len());
100 assert_eq!(transactions.len(), executed_fx_digests.len());
101 let mut accumulator_versions = vec![None; transactions.len()];
102 let mut next_update_index = 0;
103 for (idx, efx) in effects.iter().enumerate() {
104 let acc_version = efx.object_changes().into_iter().find_map(|change| {
109 if change.id == SUI_ACCUMULATOR_ROOT_OBJECT_ID {
110 change.input_version
111 } else {
112 None
113 }
114 });
115 if let Some(acc_version) = acc_version {
116 for slot in accumulator_versions
118 .iter_mut()
119 .take(idx + 1)
120 .skip(next_update_index)
121 {
122 *slot = Some(acc_version);
123 }
124 next_update_index = idx + 1;
125 }
126 }
127 assert!(
131 next_update_index == 0
132 || next_update_index == transactions.len()
133 || (next_update_index == transactions.len() - 1
134 && transactions
135 .last()
136 .unwrap()
137 .transaction_data()
138 .is_end_of_epoch_tx())
139 );
140 Self {
141 transactions,
142 effects,
143 executed_fx_digests,
144 accumulator_versions,
145 }
146 }
147}
148pub(crate) struct CheckpointExecutionState {
149 pub data: CheckpointExecutionData,
150
151 state_hasher: Option<GlobalStateHash>,
152 full_data: Option<Checkpoint>,
153}
154
155impl CheckpointExecutionState {
156 pub fn new(data: CheckpointExecutionData) -> Self {
157 Self {
158 data,
159 state_hasher: None,
160 full_data: None,
161 }
162 }
163
164 pub fn new_with_global_state_hasher(
165 data: CheckpointExecutionData,
166 hasher: GlobalStateHash,
167 ) -> Self {
168 Self {
169 data,
170 state_hasher: Some(hasher),
171 full_data: None,
172 }
173 }
174}
175
176macro_rules! finish_stage {
177 ($handle:expr, $stage:ident) => {
178 $handle.finish_stage(PipelineStage::$stage).await;
179 };
180}
181
182pub struct CheckpointExecutor {
183 epoch_store: Arc<AuthorityPerEpochStore>,
184 state: Arc<AuthorityState>,
185 checkpoint_store: Arc<CheckpointStore>,
188 object_cache_reader: Arc<dyn ObjectCacheRead>,
189 transaction_cache_reader: Arc<dyn TransactionCacheRead>,
190 execution_scheduler: Arc<ExecutionScheduler>,
191 global_state_hasher: Arc<GlobalStateHasher>,
192 backpressure_manager: Arc<BackpressureManager>,
193 config: CheckpointExecutorConfig,
194 metrics: Arc<CheckpointExecutorMetrics>,
195 tps_estimator: Mutex<TPSEstimator>,
196 subscription_service_checkpoint_sender: Option<tokio::sync::mpsc::Sender<Checkpoint>>,
197}
198
199impl CheckpointExecutor {
200 pub fn new(
201 epoch_store: Arc<AuthorityPerEpochStore>,
202 checkpoint_store: Arc<CheckpointStore>,
203 state: Arc<AuthorityState>,
204 global_state_hasher: Arc<GlobalStateHasher>,
205 backpressure_manager: Arc<BackpressureManager>,
206 config: CheckpointExecutorConfig,
207 metrics: Arc<CheckpointExecutorMetrics>,
208 subscription_service_checkpoint_sender: Option<tokio::sync::mpsc::Sender<Checkpoint>>,
209 ) -> Self {
210 Self {
211 epoch_store,
212 state: state.clone(),
213 checkpoint_store,
214 object_cache_reader: state.get_object_cache_reader().clone(),
215 transaction_cache_reader: state.get_transaction_cache_reader().clone(),
216 execution_scheduler: state.execution_scheduler().clone(),
217 global_state_hasher,
218 backpressure_manager,
219 config,
220 metrics,
221 tps_estimator: Mutex::new(TPSEstimator::default()),
222 subscription_service_checkpoint_sender,
223 }
224 }
225
226 pub fn new_for_tests(
227 epoch_store: Arc<AuthorityPerEpochStore>,
228 checkpoint_store: Arc<CheckpointStore>,
229 state: Arc<AuthorityState>,
230 state_hasher: Arc<GlobalStateHasher>,
231 ) -> Self {
232 Self::new(
233 epoch_store,
234 checkpoint_store,
235 state,
236 state_hasher,
237 BackpressureManager::new_for_tests(),
238 Default::default(),
239 CheckpointExecutorMetrics::new_for_tests(),
240 None,
241 )
242 }
243
244 fn get_next_to_schedule(&self) -> Option<CheckpointSequenceNumber> {
247 let highest_executed = self
251 .checkpoint_store
252 .get_highest_executed_checkpoint()
253 .unwrap();
254
255 if let Some(highest_executed) = &highest_executed
256 && self.epoch_store.epoch() == highest_executed.epoch()
257 && highest_executed.is_last_checkpoint_of_epoch()
258 {
259 info!(seq = ?highest_executed.sequence_number, "final checkpoint of epoch has already been executed");
262 return None;
263 }
264
265 Some(
266 highest_executed
267 .as_ref()
268 .map(|c| c.sequence_number() + 1)
269 .unwrap_or_else(|| {
270 assert_eq!(self.epoch_store.epoch(), 0);
272 0
274 }),
275 )
276 }
277
278 #[instrument(level = "error", skip_all, fields(epoch = ?self.epoch_store.epoch()))]
282 pub async fn run_epoch(self, run_with_range: Option<RunWithRange>) -> StopReason {
283 let _metrics_scope = mysten_metrics::monitored_scope("CheckpointExecutor::run_epoch");
284 info!(?run_with_range, "CheckpointExecutor::run_epoch");
285 debug!(
286 "Checkpoint executor running for epoch {:?}",
287 self.epoch_store.epoch(),
288 );
289
290 if run_with_range.is_some_and(|rwr| rwr.is_epoch_gt(self.epoch_store.epoch())) {
294 info!("RunWithRange condition satisfied at {:?}", run_with_range,);
295 return StopReason::RunWithRangeCondition;
296 };
297
298 self.metrics
299 .checkpoint_exec_epoch
300 .set(self.epoch_store.epoch() as i64);
301
302 let Some(next_to_schedule) = self.get_next_to_schedule() else {
303 return StopReason::EpochComplete;
304 };
305
306 let this = Arc::new(self);
307
308 let concurrency = std::env::var("SUI_CHECKPOINT_EXECUTION_MAX_CONCURRENCY")
309 .ok()
310 .and_then(|s| s.parse().ok())
311 .unwrap_or(this.config.checkpoint_execution_max_concurrency);
312
313 let pipeline_stages = PipelineStages::new(next_to_schedule, this.metrics.clone());
314
315 let final_checkpoint_executed = stream_synced_checkpoints(
316 this.checkpoint_store.clone(),
317 next_to_schedule,
318 run_with_range.and_then(|rwr| rwr.into_checkpoint_bound()),
319 )
320 .map(|checkpoint| {
322 let this = this.clone();
323 let pipeline_handle = pipeline_stages.handle(*checkpoint.sequence_number());
324 async move {
325 let pipeline_handle = pipeline_handle.await;
326 tokio::spawn(this.execute_checkpoint(checkpoint, pipeline_handle))
327 .await
328 .unwrap()
329 }
330 })
331 .buffered(concurrency)
332 .fold(false, |state, is_final_checkpoint| async move {
334 assert!(
335 !state,
336 "fold can't be called again after the final checkpoint"
337 );
338 is_final_checkpoint
339 })
340 .await;
341
342 if final_checkpoint_executed {
343 StopReason::EpochComplete
344 } else {
345 StopReason::RunWithRangeCondition
346 }
347 }
348}
349
350impl CheckpointExecutor {
351 #[instrument(level = "info", skip_all, fields(seq = ?checkpoint.sequence_number()))]
353 async fn execute_checkpoint(
354 self: Arc<Self>,
355 checkpoint: VerifiedCheckpoint,
356 mut pipeline_handle: PipelineHandle,
357 ) -> bool {
358 info!("executing checkpoint");
359 let sequence_number = checkpoint.sequence_number;
360
361 checkpoint.report_checkpoint_age(
362 &self.metrics.checkpoint_contents_age,
363 &self.metrics.checkpoint_contents_age_ms,
364 );
365 self.backpressure_manager
366 .update_highest_certified_checkpoint(sequence_number);
367
368 if checkpoint.is_last_checkpoint_of_epoch() && sequence_number > 0 {
369 let _wait_for_previous_checkpoints_guard = mysten_metrics::monitored_scope(
370 "CheckpointExecutor::wait_for_previous_checkpoints",
371 );
372
373 info!(
374 "Reached end of epoch checkpoint, waiting for all previous checkpoints to be executed"
375 );
376 self.checkpoint_store
377 .notify_read_executed_checkpoint(sequence_number - 1)
378 .await;
379 }
380
381 let _parallel_step_guard =
382 mysten_metrics::monitored_scope("CheckpointExecutor::parallel_step");
383
384 let ckpt_state = if self.state.is_fullnode(&self.epoch_store)
386 || checkpoint.is_last_checkpoint_of_epoch()
387 {
388 self.execute_transactions_from_synced_checkpoint(checkpoint, &mut pipeline_handle)
389 .await
390 } else {
391 self.verify_locally_built_checkpoint(checkpoint, &mut pipeline_handle)
392 .await
393 };
394
395 let tps = self.tps_estimator.lock().update(
396 Instant::now(),
397 ckpt_state.data.checkpoint.network_total_transactions,
398 );
399 self.metrics.checkpoint_exec_sync_tps.set(tps as i64);
400
401 self.backpressure_manager
402 .update_highest_executed_checkpoint(*ckpt_state.data.checkpoint.sequence_number());
403
404 let is_final_checkpoint = ckpt_state.data.checkpoint.is_last_checkpoint_of_epoch();
405
406 let seq = ckpt_state.data.checkpoint.sequence_number;
407
408 let batch = self
409 .state
410 .get_cache_commit()
411 .build_db_batch(self.epoch_store.epoch(), &ckpt_state.data.tx_digests);
412
413 finish_stage!(pipeline_handle, BuildDbBatch);
414
415 let object_funds_checker = self.state.object_funds_checker.load();
416 if let Some(object_funds_checker) = object_funds_checker.as_ref() {
417 object_funds_checker.commit_effects(batch.0.iter().map(|o| &o.effects));
418 }
419
420 let mut ckpt_state = tokio::task::spawn_blocking({
421 let this = self.clone();
422 move || {
423 let cache_commit = this.state.get_cache_commit();
425 debug!(?seq, "committing checkpoint transactions to disk");
426 cache_commit.commit_transaction_outputs(
427 this.epoch_store.epoch(),
428 batch,
429 &ckpt_state.data.tx_digests,
430 );
431 ckpt_state
432 }
433 })
434 .await
435 .unwrap();
436
437 finish_stage!(pipeline_handle, CommitTransactionOutputs);
438
439 self.epoch_store
440 .handle_finalized_checkpoint(&ckpt_state.data.checkpoint, &ckpt_state.data.tx_digests)
441 .expect("cannot fail");
442
443 let randomness_rounds = self.extract_randomness_rounds(
444 &ckpt_state.data.checkpoint,
445 &ckpt_state.data.checkpoint_contents,
446 );
447
448 if let Some(randomness_reporter) = self.epoch_store.randomness_reporter() {
452 for round in randomness_rounds {
453 debug!(
454 ?round,
455 "notifying RandomnessReporter that randomness update was executed in checkpoint"
456 );
457 randomness_reporter
458 .notify_randomness_in_checkpoint(round)
459 .expect("epoch cannot have ended");
460 }
461 }
462
463 finish_stage!(pipeline_handle, FinalizeCheckpoint);
464
465 if let Some(checkpoint_data) = ckpt_state.full_data.take() {
466 self.commit_index_updates_and_enqueue_to_subscription_service(checkpoint_data)
467 .await;
468 }
469
470 finish_stage!(pipeline_handle, UpdateRpcIndex);
471
472 self.global_state_hasher
473 .accumulate_running_root(&self.epoch_store, seq, ckpt_state.state_hasher)
474 .expect("Failed to accumulate running root");
475
476 if is_final_checkpoint {
477 self.checkpoint_store
478 .insert_epoch_last_checkpoint(self.epoch_store.epoch(), &ckpt_state.data.checkpoint)
479 .expect("Failed to insert epoch last checkpoint");
480
481 self.global_state_hasher
482 .accumulate_epoch(self.epoch_store.clone(), seq)
483 .expect("Accumulating epoch cannot fail");
484
485 self.checkpoint_store
486 .prune_local_summaries()
487 .tap_err(|e| debug_fatal!("Failed to prune local summaries: {}", e))
488 .ok();
489 }
490
491 fail_point!("crash");
492
493 self.bump_highest_executed_checkpoint(&ckpt_state.data.checkpoint);
494
495 finish_stage!(pipeline_handle, BumpHighestExecutedCheckpoint);
496
497 ckpt_state.data.checkpoint.is_last_checkpoint_of_epoch()
500 }
501
502 #[instrument(level = "info", skip_all)]
505 async fn verify_locally_built_checkpoint(
506 &self,
507 checkpoint: VerifiedCheckpoint,
508 pipeline_handle: &mut PipelineHandle,
509 ) -> CheckpointExecutionState {
510 assert!(
511 !checkpoint.is_last_checkpoint_of_epoch(),
512 "only fullnode path has end-of-epoch logic"
513 );
514
515 let sequence_number = checkpoint.sequence_number;
516 let locally_built_checkpoint = self
517 .checkpoint_store
518 .get_locally_computed_checkpoint(sequence_number)
519 .expect("db error");
520
521 let Some(locally_built_checkpoint) = locally_built_checkpoint else {
522 return self
524 .execute_transactions_from_synced_checkpoint(checkpoint, pipeline_handle)
525 .await;
526 };
527
528 self.metrics.checkpoint_executor_validator_path.inc();
529
530 assert_checkpoint_not_forked(
532 &locally_built_checkpoint,
533 &checkpoint,
534 &self.checkpoint_store,
535 );
536
537 let state_hasher = {
539 let _metrics_scope =
540 mysten_metrics::monitored_scope("CheckpointExecutor::notify_read_state_hasher");
541 self.epoch_store
542 .notify_read_checkpoint_state_hasher(&[sequence_number])
543 .await
544 .unwrap()
545 .pop()
546 .unwrap()
547 };
548
549 let checkpoint_contents = self
550 .checkpoint_store
551 .get_checkpoint_contents(&checkpoint.content_digest)
552 .expect("db error")
553 .expect("checkpoint contents not found");
554
555 let (tx_digests, fx_digests): (Vec<_>, Vec<_>) = checkpoint_contents
556 .iter()
557 .map(|digests| (digests.transaction, digests.effects))
558 .unzip();
559
560 pipeline_handle
561 .skip_to(PipelineStage::FinalizeTransactions)
562 .await;
563
564 self.insert_finalized_transactions(&tx_digests, sequence_number);
567
568 pipeline_handle.skip_to(PipelineStage::BuildDbBatch).await;
569
570 CheckpointExecutionState::new_with_global_state_hasher(
571 CheckpointExecutionData {
572 checkpoint,
573 checkpoint_contents,
574 tx_digests,
575 fx_digests,
576 },
577 state_hasher,
578 )
579 }
580
581 #[instrument(level = "info", skip_all)]
582 async fn execute_transactions_from_synced_checkpoint(
583 &self,
584 checkpoint: VerifiedCheckpoint,
585 pipeline_handle: &mut PipelineHandle,
586 ) -> CheckpointExecutionState {
587 let sequence_number = checkpoint.sequence_number;
588 let (mut ckpt_state, tx_data, unexecuted_tx_digests) = {
589 let _scope =
590 mysten_metrics::monitored_scope("CheckpointExecutor::execute_transactions");
591 let (ckpt_state, tx_data) = self.load_checkpoint_transactions(checkpoint);
592 let unexecuted_tx_digests = self.schedule_transaction_execution(&ckpt_state, &tx_data);
593 (ckpt_state, tx_data, unexecuted_tx_digests)
594 };
595
596 finish_stage!(pipeline_handle, ExecuteTransactions);
597
598 {
599 self.transaction_cache_reader
600 .notify_read_executed_effects_digests(
601 "CheckpointExecutor::notify_read_executed_effects_digests",
602 &unexecuted_tx_digests,
603 )
604 .await;
605 }
606
607 finish_stage!(pipeline_handle, WaitForTransactions);
608
609 if ckpt_state.data.checkpoint.is_last_checkpoint_of_epoch() {
610 self.execute_change_epoch_tx(&tx_data).await;
611 }
612
613 let _scope = mysten_metrics::monitored_scope("CheckpointExecutor::finalize_checkpoint");
614
615 if self.state.is_fullnode(&self.epoch_store) {
616 self.state.congestion_tracker.process_checkpoint_effects(
617 &*self.transaction_cache_reader,
618 &ckpt_state.data.checkpoint,
619 &tx_data.effects,
620 );
621 }
622
623 self.insert_finalized_transactions(&ckpt_state.data.tx_digests, sequence_number);
624
625 ckpt_state.state_hasher = Some(
629 self.global_state_hasher
630 .accumulate_checkpoint(&tx_data.effects, sequence_number, &self.epoch_store)
631 .expect("epoch cannot have ended"),
632 );
633
634 finish_stage!(pipeline_handle, FinalizeTransactions);
635
636 ckpt_state.full_data = self.process_checkpoint_data(&ckpt_state.data, &tx_data);
637
638 finish_stage!(pipeline_handle, ProcessCheckpointData);
639
640 ckpt_state
641 }
642
643 fn checkpoint_data_enabled(&self) -> bool {
644 self.subscription_service_checkpoint_sender.is_some()
645 || self.state.rpc_index.is_some()
646 || self.config.data_ingestion_dir.is_some()
647 }
648
649 fn insert_finalized_transactions(
650 &self,
651 tx_digests: &[TransactionDigest],
652 sequence_number: CheckpointSequenceNumber,
653 ) {
654 self.epoch_store
655 .insert_finalized_transactions(tx_digests, sequence_number)
656 .expect("failed to insert finalized transactions");
657
658 if self.state.is_fullnode(&self.epoch_store) {
659 self.state
661 .get_checkpoint_cache()
662 .deprecated_insert_finalized_transactions(
663 tx_digests,
664 self.epoch_store.epoch(),
665 sequence_number,
666 );
667 }
668 }
669
670 #[instrument(level = "info", skip_all)]
671 fn process_checkpoint_data(
672 &self,
673 ckpt_data: &CheckpointExecutionData,
674 tx_data: &CheckpointTransactionData,
675 ) -> Option<Checkpoint> {
676 if !self.checkpoint_data_enabled() {
677 return None;
678 }
679
680 let checkpoint = load_checkpoint(
681 ckpt_data,
682 tx_data,
683 self.state.get_object_store(),
684 &*self.transaction_cache_reader,
685 )
686 .expect("failed to load checkpoint data");
687
688 if self.state.rpc_index.is_some() || self.config.data_ingestion_dir.is_some() {
689 let checkpoint_data = checkpoint.clone().into();
690 if let Some(rpc_index) = &self.state.rpc_index {
693 let mut layout_resolver = self.epoch_store.executor().type_layout_resolver(
694 Box::new(PackageStoreWithFallback::new(
695 self.state.get_backing_package_store(),
696 &checkpoint_data,
697 )),
698 );
699
700 rpc_index.index_checkpoint(&checkpoint_data, layout_resolver.as_mut());
701 }
702
703 if let Some(path) = &self.config.data_ingestion_dir {
704 store_checkpoint_locally(path, &checkpoint_data)
705 .expect("failed to store checkpoint locally");
706 }
707 }
708
709 Some(checkpoint)
710 }
711
712 #[instrument(level = "info", skip_all)]
714 fn load_checkpoint_transactions(
715 &self,
716 checkpoint: VerifiedCheckpoint,
717 ) -> (CheckpointExecutionState, CheckpointTransactionData) {
718 let seq = checkpoint.sequence_number;
719 let epoch = checkpoint.epoch;
720
721 let checkpoint_contents = self
722 .checkpoint_store
723 .get_checkpoint_contents(&checkpoint.content_digest)
724 .expect("db error")
725 .expect("checkpoint contents not found");
726
727 if let Some(full_contents) = self
731 .checkpoint_store
732 .get_full_checkpoint_contents_by_sequence_number(seq)
733 .tap_err(|e| debug_fatal!("Failed to get checkpoint contents from store: {e}"))
734 .ok()
735 .flatten()
736 .tap_some(|_| debug!("loaded full checkpoint contents in bulk for sequence {seq}"))
737 {
738 let num_txns = full_contents.size();
739 let mut tx_digests = Vec::with_capacity(num_txns);
740 let mut transactions = Vec::with_capacity(num_txns);
741 let mut effects = Vec::with_capacity(num_txns);
742 let mut fx_digests = Vec::with_capacity(num_txns);
743
744 full_contents
745 .into_iter()
746 .zip(checkpoint_contents.iter())
747 .for_each(|(execution_data, digests)| {
748 let tx_digest = digests.transaction;
749 let fx_digest = digests.effects;
750 debug_assert_eq!(tx_digest, *execution_data.transaction.digest());
751 debug_assert_eq!(fx_digest, execution_data.effects.digest());
752
753 tx_digests.push(tx_digest);
754 transactions.push(VerifiedExecutableTransaction::new_from_checkpoint(
755 VerifiedTransaction::new_unchecked(execution_data.transaction),
756 epoch,
757 seq,
758 ));
759 effects.push(execution_data.effects);
760 fx_digests.push(fx_digest);
761 });
762
763 let executed_fx_digests = self
764 .transaction_cache_reader
765 .multi_get_executed_effects_digests(&tx_digests);
766
767 (
768 CheckpointExecutionState::new(CheckpointExecutionData {
769 checkpoint,
770 checkpoint_contents,
771 tx_digests,
772 fx_digests,
773 }),
774 CheckpointTransactionData::new(transactions, effects, executed_fx_digests),
775 )
776 } else {
777 let digests = checkpoint_contents.inner();
782
783 let (tx_digests, fx_digests): (Vec<_>, Vec<_>) = digests
784 .digests_iter()
785 .map(|d| (d.transaction, d.effects))
786 .unzip();
787 let transactions = self
788 .transaction_cache_reader
789 .multi_get_transaction_blocks(&tx_digests)
790 .into_iter()
791 .enumerate()
792 .map(|(i, tx)| {
793 let tx = tx
794 .unwrap_or_else(|| fatal!("transaction not found for {:?}", tx_digests[i]));
795 let tx = Arc::try_unwrap(tx).unwrap_or_else(|tx| (*tx).clone());
796 VerifiedExecutableTransaction::new_from_checkpoint(tx, epoch, seq)
797 })
798 .collect();
799 let effects = self
800 .transaction_cache_reader
801 .multi_get_effects(&fx_digests)
802 .into_iter()
803 .enumerate()
804 .map(|(i, effect)| {
805 effect.unwrap_or_else(|| {
806 fatal!("checkpoint effect not found for {:?}", digests[i])
807 })
808 })
809 .collect();
810
811 let executed_fx_digests = self
812 .transaction_cache_reader
813 .multi_get_executed_effects_digests(&tx_digests);
814
815 (
816 CheckpointExecutionState::new(CheckpointExecutionData {
817 checkpoint,
818 checkpoint_contents,
819 tx_digests,
820 fx_digests,
821 }),
822 CheckpointTransactionData::new(transactions, effects, executed_fx_digests),
823 )
824 }
825 }
826
827 #[instrument(level = "info", skip_all)]
829 fn schedule_transaction_execution(
830 &self,
831 ckpt_state: &CheckpointExecutionState,
832 tx_data: &CheckpointTransactionData,
833 ) -> Vec<TransactionDigest> {
834 let mut barrier_deps_builder = BarrierDependencyBuilder::new();
835
836 let (unexecuted_tx_digests, unexecuted_txns): (Vec<_>, Vec<_>) = itertools::multiunzip(
838 itertools::izip!(
839 tx_data.transactions.iter(),
840 ckpt_state.data.tx_digests.iter(),
841 ckpt_state.data.fx_digests.iter(),
842 tx_data.effects.iter(),
843 tx_data.executed_fx_digests.iter(),
844 tx_data.accumulator_versions.iter()
845 )
846 .filter_map(
847 |(
848 txn,
849 tx_digest,
850 expected_fx_digest,
851 effects,
852 executed_fx_digest,
853 accumulator_version,
854 )| {
855 let barrier_deps =
856 barrier_deps_builder.process_tx(*tx_digest, txn.transaction_data());
857
858 if let Some(executed_fx_digest) = executed_fx_digest {
859 assert_not_forked(
860 &ckpt_state.data.checkpoint,
861 tx_digest,
862 expected_fx_digest,
863 executed_fx_digest,
864 &*self.transaction_cache_reader,
865 );
866 None
867 } else if txn.transaction_data().is_end_of_epoch_tx() {
868 None
869 } else {
870 let assigned_versions = self
871 .epoch_store
872 .acquire_shared_version_assignments_from_effects(
873 txn,
874 effects,
875 *accumulator_version,
876 &*self.object_cache_reader,
877 )
878 .expect("failed to acquire shared version assignments");
879
880 let mut env = ExecutionEnv::new()
881 .with_assigned_versions(assigned_versions)
882 .with_expected_effects_digest(*expected_fx_digest)
883 .with_barrier_dependencies(barrier_deps);
884
885 if let &ExecutionStatus::Failure {
887 error: ExecutionFailureStatus::InsufficientFundsForWithdraw,
888 ..
889 } = effects.status()
890 {
891 env = env.with_insufficient_funds();
892 }
893
894 Some((tx_digest, (txn.clone(), env)))
895 }
896 },
897 ),
898 );
899
900 self.execution_scheduler
902 .enqueue_transactions(unexecuted_txns, &self.epoch_store);
903
904 unexecuted_tx_digests
905 }
906
907 #[instrument(level = "error", skip_all)]
909 async fn execute_change_epoch_tx(&self, tx_data: &CheckpointTransactionData) {
910 let change_epoch_tx = tx_data.transactions.last().unwrap();
911 let change_epoch_fx = tx_data.effects.last().unwrap();
912 assert_eq!(
913 change_epoch_tx.digest(),
914 change_epoch_fx.transaction_digest()
915 );
916 assert!(
917 change_epoch_tx.transaction_data().is_end_of_epoch_tx(),
918 "final txn must be an end of epoch txn"
919 );
920
921 let assigned_versions = self
939 .epoch_store
940 .acquire_shared_version_assignments_from_effects(
941 change_epoch_tx,
942 change_epoch_fx,
943 None,
944 self.object_cache_reader.as_ref(),
945 )
946 .expect("Acquiring shared version assignments for change_epoch tx cannot fail");
947
948 info!(
949 "scheduling change epoch txn with digest: {:?}, expected effects digest: {:?}, assigned versions: {:?}",
950 change_epoch_tx.digest(),
951 change_epoch_fx.digest(),
952 assigned_versions
953 );
954 self.execution_scheduler.enqueue_transactions(
955 vec![(
956 change_epoch_tx.clone(),
957 ExecutionEnv::new()
958 .with_assigned_versions(assigned_versions)
959 .with_expected_effects_digest(change_epoch_fx.digest()),
960 )],
961 &self.epoch_store,
962 );
963
964 self.transaction_cache_reader
965 .notify_read_executed_effects_digests(
966 "CheckpointExecutor::notify_read_advance_epoch_tx",
967 &[*change_epoch_tx.digest()],
968 )
969 .await;
970 }
971
972 #[instrument(level = "debug", skip_all)]
974 fn bump_highest_executed_checkpoint(&self, checkpoint: &VerifiedCheckpoint) {
975 let seq = *checkpoint.sequence_number();
977 debug!("Bumping highest_executed_checkpoint watermark to {seq:?}");
978 if let Some(prev_highest) = self
979 .checkpoint_store
980 .get_highest_executed_checkpoint_seq_number()
981 .unwrap()
982 {
983 assert_eq!(prev_highest + 1, seq);
984 } else {
985 assert_eq!(seq, 0);
986 }
987 if seq.is_multiple_of(CHECKPOINT_PROGRESS_LOG_COUNT_INTERVAL) {
988 info!("Finished syncing and executing checkpoint {}", seq);
989 }
990
991 fail_point!("highest-executed-checkpoint");
992
993 const NUM_SAVED_FULL_CHECKPOINT_CONTENTS: u64 = 5_000;
996 if seq >= NUM_SAVED_FULL_CHECKPOINT_CONTENTS {
997 let prune_seq = seq - NUM_SAVED_FULL_CHECKPOINT_CONTENTS;
998 if let Some(prune_checkpoint) = self
999 .checkpoint_store
1000 .get_checkpoint_by_sequence_number(prune_seq)
1001 .expect("Failed to fetch checkpoint")
1002 {
1003 self.checkpoint_store
1004 .delete_full_checkpoint_contents(prune_seq)
1005 .expect("Failed to delete full checkpoint contents");
1006 self.checkpoint_store
1007 .delete_contents_digest_sequence_number_mapping(
1008 &prune_checkpoint.content_digest,
1009 )
1010 .expect("Failed to delete contents digest -> sequence number mapping");
1011 } else {
1012 debug!(
1016 "Failed to fetch checkpoint with sequence number {:?}",
1017 prune_seq
1018 );
1019 }
1020 }
1021
1022 self.checkpoint_store
1023 .update_highest_executed_checkpoint(checkpoint)
1024 .unwrap();
1025 self.metrics.last_executed_checkpoint.set(seq as i64);
1026
1027 self.metrics
1028 .last_executed_checkpoint_timestamp_ms
1029 .set(checkpoint.timestamp_ms as i64);
1030 checkpoint.report_checkpoint_age(
1031 &self.metrics.last_executed_checkpoint_age,
1032 &self.metrics.last_executed_checkpoint_age_ms,
1033 );
1034 }
1035
1036 #[instrument(level = "info", skip_all)]
1039 async fn commit_index_updates_and_enqueue_to_subscription_service(
1040 &self,
1041 checkpoint: Checkpoint,
1042 ) {
1043 if let Some(rpc_index) = &self.state.rpc_index {
1044 rpc_index
1045 .commit_update_for_checkpoint(checkpoint.summary.sequence_number)
1046 .expect("failed to update rpc_indexes");
1047 }
1048
1049 if let Some(sender) = &self.subscription_service_checkpoint_sender
1050 && let Err(e) = sender.send(checkpoint).await
1051 {
1052 warn!("unable to send checkpoint to subscription service: {e}");
1053 }
1054 }
1055
1056 #[instrument(level = "debug", skip_all)]
1059 fn extract_randomness_rounds(
1060 &self,
1061 checkpoint: &VerifiedCheckpoint,
1062 checkpoint_contents: &CheckpointContents,
1063 ) -> Vec<RandomnessRound> {
1064 if let Some(version_specific_data) = checkpoint
1065 .version_specific_data(self.epoch_store.protocol_config())
1066 .expect("unable to get version_specific_data")
1067 {
1068 version_specific_data.into_v1().randomness_rounds
1070 } else {
1071 assert_eq!(
1076 0,
1077 self.epoch_store
1078 .protocol_config()
1079 .min_checkpoint_interval_ms_as_option()
1080 .unwrap_or_default(),
1081 );
1082 if let Some(first_digest) = checkpoint_contents.inner().first_digests() {
1083 let maybe_randomness_tx = self.transaction_cache_reader.get_transaction_block(&first_digest.transaction)
1084 .unwrap_or_else(||
1085 fatal!(
1086 "state-sync should have ensured that transaction with digests {first_digest:?} exists for checkpoint: {}",
1087 checkpoint.sequence_number()
1088 )
1089 );
1090 if let TransactionKind::RandomnessStateUpdate(rsu) =
1091 maybe_randomness_tx.data().transaction_data().kind()
1092 {
1093 vec![rsu.randomness_round]
1094 } else {
1095 Vec::new()
1096 }
1097 } else {
1098 Vec::new()
1099 }
1100 }
1101 }
1102}