sui_node/
lib.rs

1// Copyright (c) Mysten Labs, Inc.
2// SPDX-License-Identifier: Apache-2.0
3
4use anemo::Network;
5use anemo::PeerId;
6use anemo_tower::callback::CallbackLayer;
7use anemo_tower::trace::DefaultMakeSpan;
8use anemo_tower::trace::DefaultOnFailure;
9use anemo_tower::trace::TraceLayer;
10use anyhow::Context;
11use anyhow::Result;
12use anyhow::anyhow;
13use arc_swap::ArcSwap;
14use fastcrypto_zkp::bn254::zk_login::JwkId;
15use fastcrypto_zkp::bn254::zk_login::OIDCProvider;
16use futures::future::BoxFuture;
17use mysten_common::in_test_configuration;
18use prometheus::Registry;
19use std::collections::{BTreeSet, HashMap, HashSet};
20use std::fmt;
21use std::future::Future;
22use std::path::PathBuf;
23use std::str::FromStr;
24#[cfg(msim)]
25use std::sync::atomic::Ordering;
26use std::sync::{Arc, Weak};
27use std::time::Duration;
28use sui_core::admission_queue::{
29    AdmissionQueueContext, AdmissionQueueManager, AdmissionQueueMetrics,
30};
31use sui_core::authority::ExecutionEnv;
32use sui_core::authority::RandomnessRoundReceiver;
33use sui_core::authority::authority_store_tables::AuthorityPerpetualTablesOptions;
34use sui_core::authority::backpressure::BackpressureManager;
35use sui_core::authority::epoch_start_configuration::EpochFlag;
36use sui_core::authority::execution_time_estimator::ExecutionTimeObserver;
37use sui_core::consensus_adapter::ConsensusClient;
38use sui_core::consensus_manager::UpdatableConsensusClient;
39use sui_core::epoch::randomness::RandomnessManager;
40use sui_core::execution_cache::build_execution_cache;
41use sui_network::endpoint_manager::{AddressSource, EndpointId};
42use sui_network::validator::server::SUI_TLS_SERVER_NAME;
43use sui_types::full_checkpoint_content::Checkpoint;
44use sui_types::node_role::NodeRole;
45
46use sui_core::global_state_hasher::GlobalStateHashMetrics;
47use sui_core::storage::RestReadStore;
48use sui_json_rpc::bridge_api::BridgeReadApi;
49use sui_json_rpc_api::JsonRpcMetrics;
50use sui_network::randomness;
51use sui_rpc_api::RpcMetrics;
52use sui_rpc_api::ServerVersion;
53use sui_rpc_api::subscription::SubscriptionService;
54use sui_types::base_types::ConciseableName;
55use sui_types::crypto::RandomnessRound;
56use sui_types::digests::{
57    ChainIdentifier, CheckpointDigest, TransactionDigest, TransactionEffectsDigest,
58};
59use sui_types::messages_consensus::AuthorityCapabilitiesV2;
60use sui_types::sui_system_state::SuiSystemState;
61use tap::tap::TapFallible;
62use tokio::sync::oneshot;
63use tokio::sync::{Mutex, broadcast, mpsc};
64use tokio::task::JoinHandle;
65use tower::ServiceBuilder;
66use tracing::{Instrument, error_span, info};
67use tracing::{debug, error, warn};
68
69// Logs at debug level in test configuration, info level otherwise.
70// JWK logs cause significant volume in tests, but are insignificant in prod,
71// so we keep them at info
72macro_rules! jwk_log {
73    ($($arg:tt)+) => {
74        if in_test_configuration() {
75            debug!($($arg)+);
76        } else {
77            info!($($arg)+);
78        }
79    };
80}
81
82use fastcrypto_zkp::bn254::zk_login::JWK;
83pub use handle::SuiNodeHandle;
84use mysten_metrics::{RegistryService, spawn_monitored_task};
85use mysten_service::server_timing::server_timing_middleware;
86use sui_config::node::{DBCheckpointConfig, RunWithRange};
87use sui_config::node::{ForkCrashBehavior, ForkRecoveryConfig};
88use sui_config::node_config_metrics::NodeConfigMetrics;
89use sui_config::{ConsensusConfig, NodeConfig};
90use sui_core::authority::authority_per_epoch_store::AuthorityPerEpochStore;
91use sui_core::authority::authority_store_tables::AuthorityPerpetualTables;
92use sui_core::authority::epoch_start_configuration::EpochStartConfigTrait;
93use sui_core::authority::epoch_start_configuration::EpochStartConfiguration;
94use sui_core::authority::submitted_transaction_cache::SubmittedTransactionCacheMetrics;
95use sui_core::authority_aggregator::AuthorityAggregator;
96use sui_core::authority_server::{ValidatorService, ValidatorServiceMetrics};
97use sui_core::checkpoints::checkpoint_executor::metrics::CheckpointExecutorMetrics;
98use sui_core::checkpoints::checkpoint_executor::{CheckpointExecutor, StopReason};
99use sui_core::checkpoints::{
100    CheckpointMetrics, CheckpointOutput, CheckpointService, CheckpointStore, LogCheckpointOutput,
101    SendCheckpointToStateSync, SubmitCheckpointToConsensus,
102};
103use sui_core::consensus_adapter::{ConsensusAdapter, ConsensusAdapterMetrics};
104use sui_core::consensus_manager::ConsensusManager;
105use sui_core::consensus_throughput_calculator::ConsensusThroughputCalculator;
106use sui_core::consensus_validator::{SuiTxValidator, SuiTxValidatorMetrics};
107use sui_core::db_checkpoint_handler::DBCheckpointHandler;
108use sui_core::epoch::committee_store::CommitteeStore;
109use sui_core::epoch::consensus_store_pruner::ConsensusStorePruner;
110use sui_core::epoch::epoch_metrics::EpochMetrics;
111use sui_core::epoch::reconfiguration::ReconfigurationInitiator;
112use sui_core::global_state_hasher::GlobalStateHasher;
113use sui_core::jsonrpc_index::IndexStore;
114use sui_core::module_cache_metrics::ResolverMetrics;
115use sui_core::overload_monitor::overload_monitor;
116use sui_core::rpc_index::RpcIndexStore;
117use sui_core::signature_verifier::SignatureVerifierMetrics;
118use sui_core::storage::RocksDbStore;
119use sui_core::transaction_orchestrator::TransactionOrchestrator;
120use sui_core::{
121    authority::{AuthorityState, AuthorityStore},
122    authority_client::NetworkAuthorityClient,
123};
124use sui_json_rpc::JsonRpcServerBuilder;
125use sui_json_rpc::coin_api::CoinReadApi;
126use sui_json_rpc::governance_api::GovernanceReadApi;
127use sui_json_rpc::indexer_api::IndexerApi;
128use sui_json_rpc::move_utils::MoveUtils;
129use sui_json_rpc::read_api::ReadApi;
130use sui_json_rpc::transaction_builder_api::TransactionBuilderApi;
131use sui_json_rpc::transaction_execution_api::TransactionExecutionApi;
132use sui_macros::fail_point;
133use sui_macros::{fail_point_async, replay_log};
134use sui_network::api::ValidatorServer;
135use sui_network::discovery;
136use sui_network::endpoint_manager::EndpointManager;
137use sui_network::state_sync;
138use sui_network::validator::server::ServerBuilder;
139use sui_protocol_config::{Chain, ProtocolConfig, ProtocolVersion};
140use sui_snapshot::uploader::StateSnapshotUploader;
141use sui_storage::{
142    http_key_value_store::HttpKVStore,
143    key_value_store::{FallbackTransactionKVStore, TransactionKeyValueStore},
144    key_value_store_metrics::KeyValueStoreMetrics,
145};
146use sui_types::base_types::{AuthorityName, EpochId};
147use sui_types::committee::Committee;
148use sui_types::crypto::KeypairTraits;
149use sui_types::error::{SuiError, SuiResult};
150use sui_types::messages_consensus::{ConsensusTransaction, check_total_jwk_size};
151use sui_types::sui_system_state::SuiSystemStateTrait;
152use sui_types::sui_system_state::epoch_start_sui_system_state::EpochStartSystemState;
153use sui_types::sui_system_state::epoch_start_sui_system_state::EpochStartSystemStateTrait;
154use sui_types::supported_protocol_versions::SupportedProtocolVersions;
155use typed_store::DBMetrics;
156use typed_store::rocks::default_db_options;
157
158use crate::metrics::{GrpcMetrics, SuiNodeMetrics};
159
160pub mod admin;
161mod handle;
162pub mod metrics;
163
164pub struct ValidatorComponents {
165    validator_server_handle: Option<SpawnOnce>,
166    validator_overload_monitor_handle: Option<JoinHandle<()>>,
167    consensus_manager: Arc<ConsensusManager>,
168    consensus_store_pruner: ConsensusStorePruner,
169    consensus_adapter: Arc<ConsensusAdapter>,
170    checkpoint_metrics: Arc<CheckpointMetrics>,
171    sui_tx_validator_metrics: Arc<SuiTxValidatorMetrics>,
172    admission_queue: Option<AdmissionQueueContext>,
173}
174pub struct P2pComponents {
175    p2p_network: Network,
176    known_peers: HashMap<PeerId, String>,
177    discovery_handle: discovery::Handle,
178    state_sync_handle: state_sync::Handle,
179    randomness_handle: randomness::Handle,
180    endpoint_manager: EndpointManager,
181}
182
183#[cfg(msim)]
184mod simulator {
185    use std::sync::atomic::AtomicBool;
186    use sui_types::error::SuiErrorKind;
187
188    use super::*;
189    pub(super) struct SimState {
190        pub sim_node: sui_simulator::runtime::NodeHandle,
191        pub sim_safe_mode_expected: AtomicBool,
192        _leak_detector: sui_simulator::NodeLeakDetector,
193    }
194
195    impl Default for SimState {
196        fn default() -> Self {
197            Self {
198                sim_node: sui_simulator::runtime::NodeHandle::current(),
199                sim_safe_mode_expected: AtomicBool::new(false),
200                _leak_detector: sui_simulator::NodeLeakDetector::new(),
201            }
202        }
203    }
204
205    type JwkInjector = dyn Fn(AuthorityName, &OIDCProvider) -> SuiResult<Vec<(JwkId, JWK)>>
206        + Send
207        + Sync
208        + 'static;
209
210    fn default_fetch_jwks(
211        _authority: AuthorityName,
212        _provider: &OIDCProvider,
213    ) -> SuiResult<Vec<(JwkId, JWK)>> {
214        use fastcrypto_zkp::bn254::zk_login::parse_jwks;
215        // Just load a default Twitch jwk for testing.
216        parse_jwks(
217            sui_types::zk_login_util::DEFAULT_JWK_BYTES,
218            &OIDCProvider::Twitch,
219            true,
220        )
221        .map_err(|_| SuiErrorKind::JWKRetrievalError.into())
222    }
223
224    thread_local! {
225        static JWK_INJECTOR: std::cell::RefCell<Arc<JwkInjector>> = std::cell::RefCell::new(Arc::new(default_fetch_jwks));
226    }
227
228    pub(super) fn get_jwk_injector() -> Arc<JwkInjector> {
229        JWK_INJECTOR.with(|injector| injector.borrow().clone())
230    }
231
232    pub fn set_jwk_injector(injector: Arc<JwkInjector>) {
233        JWK_INJECTOR.with(|cell| *cell.borrow_mut() = injector);
234    }
235}
236
237#[cfg(msim)]
238pub use simulator::set_jwk_injector;
239#[cfg(msim)]
240use simulator::*;
241use sui_core::authority::authority_store_pruner::PrunerWatermarks;
242use sui_core::{
243    consensus_handler::ConsensusHandlerInitializer, safe_client::SafeClientMetricsBase,
244};
245
246const DEFAULT_GRPC_CONNECT_TIMEOUT: Duration = Duration::from_secs(60);
247
248pub struct SuiNode {
249    config: NodeConfig,
250    validator_components: Mutex<Option<ValidatorComponents>>,
251
252    /// The http servers responsible for serving RPC traffic (gRPC and JSON-RPC)
253    #[allow(unused)]
254    http_servers: HttpServers,
255
256    state: Arc<AuthorityState>,
257    transaction_orchestrator: Option<Arc<TransactionOrchestrator<NetworkAuthorityClient>>>,
258    registry_service: RegistryService,
259    metrics: Arc<SuiNodeMetrics>,
260    checkpoint_metrics: Arc<CheckpointMetrics>,
261
262    _discovery: discovery::Handle,
263    _connection_monitor_handle: mysten_network::anemo_connection_monitor::ConnectionMonitorHandle,
264    state_sync_handle: state_sync::Handle,
265    randomness_handle: randomness::Handle,
266    checkpoint_store: Arc<CheckpointStore>,
267    global_state_hasher: Mutex<Option<Arc<GlobalStateHasher>>>,
268
269    /// Broadcast channel to send the starting system state for the next epoch.
270    end_of_epoch_channel: broadcast::Sender<SuiSystemState>,
271
272    /// EndpointManager for updating peer network addresses.
273    endpoint_manager: EndpointManager,
274
275    backpressure_manager: Arc<BackpressureManager>,
276
277    _db_checkpoint_handle: Option<tokio::sync::broadcast::Sender<()>>,
278
279    #[cfg(msim)]
280    sim_state: SimState,
281
282    _state_snapshot_uploader_handle: Option<broadcast::Sender<()>>,
283    // Channel to allow signaling upstream to shutdown sui-node
284    shutdown_channel_tx: broadcast::Sender<Option<RunWithRange>>,
285
286    /// AuthorityAggregator of the network, created at start and beginning of each epoch.
287    /// Use ArcSwap so that we could mutate it without taking mut reference.
288    // TODO: Eventually we can make this auth aggregator a shared reference so that this
289    // update will automatically propagate to other uses.
290    auth_agg: Arc<ArcSwap<AuthorityAggregator<NetworkAuthorityClient>>>,
291
292    subscription_service_checkpoint_sender: Option<tokio::sync::mpsc::Sender<Checkpoint>>,
293}
294
295impl fmt::Debug for SuiNode {
296    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
297        f.debug_struct("SuiNode")
298            .field("name", &self.state.name.concise())
299            .finish()
300    }
301}
302
303static MAX_JWK_KEYS_PER_FETCH: usize = 100;
304
305impl SuiNode {
306    pub async fn start(
307        config: NodeConfig,
308        registry_service: RegistryService,
309    ) -> Result<Arc<SuiNode>> {
310        Self::start_async(
311            config,
312            registry_service,
313            ServerVersion::new("sui-node", "unknown"),
314        )
315        .await
316    }
317
318    fn start_jwk_updater(
319        config: &NodeConfig,
320        metrics: Arc<SuiNodeMetrics>,
321        authority: AuthorityName,
322        epoch_store: Arc<AuthorityPerEpochStore>,
323        consensus_adapter: Arc<ConsensusAdapter>,
324    ) {
325        let epoch = epoch_store.epoch();
326
327        let supported_providers = config
328            .zklogin_oauth_providers
329            .get(&epoch_store.get_chain_identifier().chain())
330            .unwrap_or(&BTreeSet::new())
331            .iter()
332            .map(|s| OIDCProvider::from_str(s).expect("Invalid provider string"))
333            .collect::<Vec<_>>();
334
335        let fetch_interval = Duration::from_secs(config.jwk_fetch_interval_seconds);
336
337        info!(
338            ?fetch_interval,
339            "Starting JWK updater tasks with supported providers: {:?}", supported_providers
340        );
341
342        fn validate_jwk(
343            metrics: &Arc<SuiNodeMetrics>,
344            provider: &OIDCProvider,
345            id: &JwkId,
346            jwk: &JWK,
347        ) -> bool {
348            let Ok(iss_provider) = OIDCProvider::from_iss(&id.iss) else {
349                warn!(
350                    "JWK iss {:?} (retrieved from {:?}) is not a valid provider",
351                    id.iss, provider
352                );
353                metrics
354                    .invalid_jwks
355                    .with_label_values(&[&provider.to_string()])
356                    .inc();
357                return false;
358            };
359
360            if iss_provider != *provider {
361                warn!(
362                    "JWK iss {:?} (retrieved from {:?}) does not match provider {:?}",
363                    id.iss, provider, iss_provider
364                );
365                metrics
366                    .invalid_jwks
367                    .with_label_values(&[&provider.to_string()])
368                    .inc();
369                return false;
370            }
371
372            if !check_total_jwk_size(id, jwk) {
373                warn!("JWK {:?} (retrieved from {:?}) is too large", id, provider);
374                metrics
375                    .invalid_jwks
376                    .with_label_values(&[&provider.to_string()])
377                    .inc();
378                return false;
379            }
380
381            true
382        }
383
384        // metrics is:
385        //  pub struct SuiNodeMetrics {
386        //      pub jwk_requests: IntCounterVec,
387        //      pub jwk_request_errors: IntCounterVec,
388        //      pub total_jwks: IntCounterVec,
389        //      pub unique_jwks: IntCounterVec,
390        //  }
391
392        for p in supported_providers.into_iter() {
393            let provider_str = p.to_string();
394            let epoch_store = epoch_store.clone();
395            let consensus_adapter = consensus_adapter.clone();
396            let metrics = metrics.clone();
397            spawn_monitored_task!(epoch_store.clone().within_alive_epoch(
398                async move {
399                    // note: restart-safe de-duplication happens after consensus, this is
400                    // just best-effort to reduce unneeded submissions.
401                    let mut seen = HashSet::new();
402                    loop {
403                        jwk_log!("fetching JWK for provider {:?}", p);
404                        metrics.jwk_requests.with_label_values(&[&provider_str]).inc();
405                        match Self::fetch_jwks(authority, &p).await {
406                            Err(e) => {
407                                metrics.jwk_request_errors.with_label_values(&[&provider_str]).inc();
408                                warn!("Error when fetching JWK for provider {:?} {:?}", p, e);
409                                // Retry in 30 seconds
410                                tokio::time::sleep(Duration::from_secs(30)).await;
411                                continue;
412                            }
413                            Ok(mut keys) => {
414                                metrics.total_jwks
415                                    .with_label_values(&[&provider_str])
416                                    .inc_by(keys.len() as u64);
417
418                                keys.retain(|(id, jwk)| {
419                                    validate_jwk(&metrics, &p, id, jwk) &&
420                                    !epoch_store.jwk_active_in_current_epoch(id, jwk) &&
421                                    seen.insert((id.clone(), jwk.clone()))
422                                });
423
424                                metrics.unique_jwks
425                                    .with_label_values(&[&provider_str])
426                                    .inc_by(keys.len() as u64);
427
428                                // prevent oauth providers from sending too many keys,
429                                // inadvertently or otherwise
430                                if keys.len() > MAX_JWK_KEYS_PER_FETCH {
431                                    warn!("Provider {:?} sent too many JWKs, only the first {} will be used", p, MAX_JWK_KEYS_PER_FETCH);
432                                    keys.truncate(MAX_JWK_KEYS_PER_FETCH);
433                                }
434
435                                for (id, jwk) in keys.into_iter() {
436                                    jwk_log!("Submitting JWK to consensus: {:?}", id);
437
438                                    let txn = ConsensusTransaction::new_jwk_fetched(authority, id, jwk);
439                                    consensus_adapter.submit(txn, None, &epoch_store, None, None)
440                                        .tap_err(|e| warn!("Error when submitting JWKs to consensus {:?}", e))
441                                        .ok();
442                                }
443                            }
444                        }
445                        tokio::time::sleep(fetch_interval).await;
446                    }
447                }
448                .instrument(error_span!("jwk_updater_task", epoch)),
449            ));
450        }
451    }
452
453    pub async fn start_async(
454        config: NodeConfig,
455        registry_service: RegistryService,
456        server_version: ServerVersion,
457    ) -> Result<Arc<SuiNode>> {
458        NodeConfigMetrics::new(&registry_service.default_registry()).record_metrics(&config);
459        let mut config = config.clone();
460        if config.supported_protocol_versions.is_none() {
461            info!(
462                "populating config.supported_protocol_versions with default {:?}",
463                SupportedProtocolVersions::SYSTEM_DEFAULT
464            );
465            config.supported_protocol_versions = Some(SupportedProtocolVersions::SYSTEM_DEFAULT);
466        }
467
468        let run_with_range = config.run_with_range;
469        let prometheus_registry = registry_service.default_registry();
470        let node_role = config.intended_node_role();
471
472        info!(node =? config.protocol_public_key(),
473            "Initializing sui-node listening on {} with role {:?}", config.network_address, node_role
474        );
475
476        // Initialize metrics to track db usage before creating any stores
477        DBMetrics::init(registry_service.clone());
478
479        // Initialize db sync-to-disk setting from config (falls back to env var if not set)
480        typed_store::init_write_sync(config.enable_db_sync_to_disk);
481
482        // Initialize Mysten metrics.
483        mysten_metrics::init_metrics(&prometheus_registry);
484        // Unsupported (because of the use of static variable) and unnecessary in simtests.
485        #[cfg(not(msim))]
486        mysten_metrics::thread_stall_monitor::start_thread_stall_monitor();
487
488        let genesis = config.genesis()?.clone();
489
490        let secret = Arc::pin(config.protocol_key_pair().copy());
491        let genesis_committee = genesis.committee();
492        let committee_store = Arc::new(CommitteeStore::new(
493            config.db_path().join("epochs"),
494            &genesis_committee,
495            None,
496        ));
497
498        let pruner_watermarks = Arc::new(PrunerWatermarks::default());
499        let checkpoint_store = CheckpointStore::new(
500            &config.db_path().join("checkpoints"),
501            pruner_watermarks.clone(),
502        );
503        let checkpoint_metrics = CheckpointMetrics::new(&registry_service.default_registry());
504
505        if node_role.runs_consensus() {
506            Self::check_and_recover_forks(
507                &checkpoint_store,
508                &checkpoint_metrics,
509                config.fork_recovery.as_ref(),
510            )
511            .await?;
512        }
513
514        // By default, only enable write stall on nodes that run consensus.
515        let enable_write_stall = config
516            .enable_db_write_stall
517            .unwrap_or(node_role.runs_consensus());
518        let perpetual_tables_options = AuthorityPerpetualTablesOptions {
519            enable_write_stall,
520            is_validator: node_role.is_validator(),
521        };
522        let perpetual_tables = Arc::new(AuthorityPerpetualTables::open(
523            &config.db_store_path(),
524            Some(perpetual_tables_options),
525            Some(pruner_watermarks.epoch_id.clone()),
526        ));
527        let is_genesis = perpetual_tables
528            .database_is_empty()
529            .expect("Database read should not fail at init.");
530
531        let backpressure_manager =
532            BackpressureManager::new_from_checkpoint_store(&checkpoint_store);
533
534        let store =
535            AuthorityStore::open(perpetual_tables, &genesis, &config, &prometheus_registry).await?;
536
537        let cur_epoch = store.get_recovery_epoch_at_restart()?;
538        let committee = committee_store
539            .get_committee(&cur_epoch)?
540            .expect("Committee of the current epoch must exist");
541        let epoch_start_configuration = store
542            .get_epoch_start_configuration()?
543            .expect("EpochStartConfiguration of the current epoch must exist");
544        let cache_metrics = Arc::new(ResolverMetrics::new(&prometheus_registry));
545        let signature_verifier_metrics = SignatureVerifierMetrics::new(&prometheus_registry);
546
547        let cache_traits = build_execution_cache(
548            &config.execution_cache,
549            &prometheus_registry,
550            &store,
551            backpressure_manager.clone(),
552        );
553
554        let auth_agg = {
555            let safe_client_metrics_base = SafeClientMetricsBase::new(&prometheus_registry);
556            Arc::new(ArcSwap::new(Arc::new(
557                AuthorityAggregator::new_from_epoch_start_state(
558                    epoch_start_configuration.epoch_start_state(),
559                    &committee_store,
560                    safe_client_metrics_base,
561                ),
562            )))
563        };
564
565        let chain_id = ChainIdentifier::from(*genesis.checkpoint().digest());
566        let chain = match config.chain_override_for_testing {
567            Some(chain) => chain,
568            None => ChainIdentifier::from(*genesis.checkpoint().digest()).chain(),
569        };
570
571        let highest_executed_checkpoint = checkpoint_store
572            .get_highest_executed_checkpoint_seq_number()
573            .expect("checkpoint store read cannot fail")
574            .unwrap_or(0);
575
576        let previous_epoch_last_checkpoint = if cur_epoch == 0 {
577            0
578        } else {
579            checkpoint_store
580                .get_epoch_last_checkpoint_seq_number(cur_epoch - 1)
581                .expect("checkpoint store read cannot fail")
582                .unwrap_or(highest_executed_checkpoint)
583        };
584
585        let epoch_options = default_db_options().optimize_db_for_write_throughput(4, false);
586        let epoch_store = AuthorityPerEpochStore::new(
587            config.protocol_public_key(),
588            committee.clone(),
589            &config.db_store_path(),
590            Some(epoch_options.options),
591            EpochMetrics::new(&registry_service.default_registry()),
592            epoch_start_configuration,
593            cache_traits.backing_package_store.clone(),
594            cache_traits.object_store.clone(),
595            cache_metrics,
596            signature_verifier_metrics,
597            &config.expensive_safety_check_config,
598            (chain_id, chain),
599            highest_executed_checkpoint,
600            previous_epoch_last_checkpoint,
601            Arc::new(SubmittedTransactionCacheMetrics::new(
602                &registry_service.default_registry(),
603            )),
604            config.fullnode_sync_mode,
605        )?;
606
607        info!("created epoch store");
608
609        replay_log!(
610            "Beginning replay run. Epoch: {:?}, Protocol config: {:?}",
611            epoch_store.epoch(),
612            epoch_store.protocol_config()
613        );
614
615        // the database is empty at genesis time
616        if is_genesis {
617            info!("checking SUI conservation at genesis");
618            // When we are opening the db table, the only time when it's safe to
619            // check SUI conservation is at genesis. Otherwise we may be in the middle of
620            // an epoch and the SUI conservation check will fail. This also initialize
621            // the expected_network_sui_amount table.
622            cache_traits
623                .reconfig_api
624                .expensive_check_sui_conservation(&epoch_store)
625                .expect("SUI conservation check cannot fail at genesis");
626        }
627
628        let effective_buffer_stake = epoch_store.get_effective_buffer_stake_bps();
629        let default_buffer_stake = epoch_store
630            .protocol_config()
631            .buffer_stake_for_protocol_upgrade_bps();
632        if effective_buffer_stake != default_buffer_stake {
633            warn!(
634                ?effective_buffer_stake,
635                ?default_buffer_stake,
636                "buffer_stake_for_protocol_upgrade_bps is currently overridden"
637            );
638        }
639
640        checkpoint_store.insert_genesis_checkpoint(
641            genesis.checkpoint(),
642            genesis.checkpoint_contents().clone(),
643            &epoch_store,
644        );
645
646        info!("creating state sync store");
647        let state_sync_store = RocksDbStore::new(
648            cache_traits.clone(),
649            committee_store.clone(),
650            checkpoint_store.clone(),
651        );
652
653        let index_store =
654            if node_role.should_enable_index_processing() && config.enable_index_processing {
655                info!("creating jsonrpc index store");
656                Some(Arc::new(IndexStore::new(
657                    config.db_path().join("indexes"),
658                    &prometheus_registry,
659                    epoch_store
660                        .protocol_config()
661                        .max_move_identifier_len_as_option(),
662                    config.remove_deprecated_tables,
663                )))
664            } else {
665                None
666            };
667
668        let rpc_index = if node_role.should_enable_index_processing()
669            && config.rpc().is_some_and(|rpc| rpc.enable_indexing())
670        {
671            info!("creating rpc index store");
672            Some(Arc::new(
673                RpcIndexStore::new(
674                    &config.db_path(),
675                    &store,
676                    &checkpoint_store,
677                    &epoch_store,
678                    &cache_traits.backing_package_store,
679                    pruner_watermarks.checkpoint_id.clone(),
680                    config.rpc().cloned().unwrap_or_default(),
681                )
682                .await,
683            ))
684        } else {
685            None
686        };
687
688        let chain_identifier = epoch_store.get_chain_identifier();
689
690        info!("creating archive reader");
691        // Create network
692        let (randomness_tx, randomness_rx) = mpsc::channel(
693            config
694                .p2p_config
695                .randomness
696                .clone()
697                .unwrap_or_default()
698                .mailbox_capacity(),
699        );
700        let P2pComponents {
701            p2p_network,
702            known_peers,
703            discovery_handle,
704            state_sync_handle,
705            randomness_handle,
706            endpoint_manager,
707        } = Self::create_p2p_network(
708            &config,
709            state_sync_store.clone(),
710            chain_identifier,
711            randomness_tx,
712            &prometheus_registry,
713        )?;
714
715        // Inject configured peer address overrides.
716        for peer in &config.p2p_config.peer_address_overrides {
717            endpoint_manager
718                .update_endpoint(
719                    EndpointId::P2p(peer.peer_id),
720                    AddressSource::Config,
721                    peer.addresses.clone(),
722                )
723                .expect("Updating peer address overrides should not fail");
724        }
725
726        // Send initial peer addresses to the p2p network.
727        update_peer_addresses(&config, &endpoint_manager, epoch_store.epoch_start_state());
728
729        info!("start snapshot upload");
730        // Start uploading state snapshot to remote store
731        let state_snapshot_handle = Self::start_state_snapshot(
732            &config,
733            &prometheus_registry,
734            checkpoint_store.clone(),
735            chain_identifier,
736        )?;
737
738        // Start uploading db checkpoints to remote store
739        info!("start db checkpoint");
740        let (db_checkpoint_config, db_checkpoint_handle) = Self::start_db_checkpoint(
741            &config,
742            &prometheus_registry,
743            state_snapshot_handle.is_some(),
744        )?;
745
746        if !epoch_store
747            .protocol_config()
748            .simplified_unwrap_then_delete()
749        {
750            // We cannot prune tombstones if simplified_unwrap_then_delete is not enabled.
751            config
752                .authority_store_pruning_config
753                .set_killswitch_tombstone_pruning(true);
754        }
755
756        let authority_name = config.protocol_public_key();
757
758        info!("create authority state");
759        let state = AuthorityState::new(
760            authority_name,
761            secret,
762            config.supported_protocol_versions.unwrap(),
763            store.clone(),
764            cache_traits.clone(),
765            epoch_store.clone(),
766            committee_store.clone(),
767            index_store.clone(),
768            rpc_index,
769            checkpoint_store.clone(),
770            &prometheus_registry,
771            genesis.objects(),
772            &db_checkpoint_config,
773            config.clone(),
774            chain_identifier,
775            config.policy_config.clone(),
776            config.firewall_config.clone(),
777            pruner_watermarks,
778        )
779        .await;
780        // ensure genesis txn was executed
781        if epoch_store.epoch() == 0 {
782            let txn = &genesis.transaction();
783            let span = error_span!("genesis_txn", tx_digest = ?txn.digest());
784            let transaction =
785                sui_types::executable_transaction::VerifiedExecutableTransaction::new_unchecked(
786                    sui_types::executable_transaction::ExecutableTransaction::new_from_data_and_sig(
787                        genesis.transaction().data().clone(),
788                        sui_types::executable_transaction::CertificateProof::Checkpoint(0, 0),
789                    ),
790                );
791            state
792                .try_execute_immediately(&transaction, ExecutionEnv::new(), &epoch_store)
793                .instrument(span)
794                .await
795                .unwrap();
796        }
797
798        // Start the loop that receives new randomness and generates transactions for it.
799        RandomnessRoundReceiver::spawn(state.clone(), randomness_rx);
800
801        if config
802            .expensive_safety_check_config
803            .enable_secondary_index_checks()
804            && let Some(indexes) = state.indexes.clone()
805        {
806            sui_core::verify_indexes::verify_indexes(
807                state.get_global_state_hash_store().as_ref(),
808                indexes,
809            )
810            .expect("secondary indexes are inconsistent");
811        }
812
813        let (end_of_epoch_channel, end_of_epoch_receiver) =
814            broadcast::channel(config.end_of_epoch_broadcast_channel_capacity);
815
816        let transaction_orchestrator = if node_role.is_fullnode() && run_with_range.is_none() {
817            Some(Arc::new(TransactionOrchestrator::new_with_auth_aggregator(
818                auth_agg.load_full(),
819                state.clone(),
820                end_of_epoch_receiver,
821                &config.db_path(),
822                &prometheus_registry,
823                &config,
824            )))
825        } else {
826            None
827        };
828
829        let (http_servers, subscription_service_checkpoint_sender) = build_http_servers(
830            state.clone(),
831            state_sync_store,
832            &transaction_orchestrator.clone(),
833            &config,
834            &prometheus_registry,
835            server_version,
836            node_role,
837        )
838        .await?;
839
840        let global_state_hasher = Arc::new(GlobalStateHasher::new(
841            cache_traits.global_state_hash_store.clone(),
842            GlobalStateHashMetrics::new(&prometheus_registry),
843        ));
844
845        let network_connection_metrics = mysten_network::quinn_metrics::QuinnConnectionMetrics::new(
846            "sui",
847            &registry_service.default_registry(),
848        );
849
850        let connection_monitor_handle =
851            mysten_network::anemo_connection_monitor::AnemoConnectionMonitor::spawn(
852                p2p_network.downgrade(),
853                Arc::new(network_connection_metrics),
854                known_peers,
855            );
856
857        let sui_node_metrics = Arc::new(SuiNodeMetrics::new(&registry_service.default_registry()));
858
859        sui_node_metrics
860            .binary_max_protocol_version
861            .set(ProtocolVersion::MAX.as_u64() as i64);
862        sui_node_metrics
863            .configured_max_protocol_version
864            .set(config.supported_protocol_versions.unwrap().max.as_u64() as i64);
865
866        let node_role = epoch_store.node_role();
867        let validator_components = if node_role.runs_consensus() {
868            let mut components = Self::construct_validator_components(
869                config.clone(),
870                state.clone(),
871                committee,
872                epoch_store.clone(),
873                checkpoint_store.clone(),
874                state_sync_handle.clone(),
875                randomness_handle.clone(),
876                Arc::downgrade(&global_state_hasher),
877                backpressure_manager.clone(),
878                &registry_service,
879                sui_node_metrics.clone(),
880                checkpoint_metrics.clone(),
881                node_role,
882            )
883            .await?;
884
885            if node_role.is_validator() {
886                components
887                    .consensus_adapter
888                    .recover_end_of_publish(&epoch_store);
889
890                // Start the gRPC server
891                components.validator_server_handle = Some(
892                    components
893                        .validator_server_handle
894                        .take()
895                        .unwrap()
896                        .start()
897                        .await,
898                );
899
900                // Set the consensus address updater so that we can update the consensus peer addresses when requested.
901                endpoint_manager
902                    .set_consensus_address_updater(components.consensus_manager.clone());
903            } else {
904                info!("Starting node as Observer — connecting to configured peers");
905            }
906
907            Some(components)
908        } else {
909            None
910        };
911
912        // setup shutdown channel
913        let (shutdown_channel, _) = broadcast::channel::<Option<RunWithRange>>(1);
914
915        let node = Self {
916            config,
917            validator_components: Mutex::new(validator_components),
918            http_servers,
919            state,
920            transaction_orchestrator,
921            registry_service,
922            metrics: sui_node_metrics,
923            checkpoint_metrics,
924
925            _discovery: discovery_handle,
926            _connection_monitor_handle: connection_monitor_handle,
927            state_sync_handle,
928            randomness_handle,
929            checkpoint_store,
930            global_state_hasher: Mutex::new(Some(global_state_hasher)),
931            end_of_epoch_channel,
932            endpoint_manager,
933            backpressure_manager,
934
935            _db_checkpoint_handle: db_checkpoint_handle,
936
937            #[cfg(msim)]
938            sim_state: Default::default(),
939
940            _state_snapshot_uploader_handle: state_snapshot_handle,
941            shutdown_channel_tx: shutdown_channel,
942
943            auth_agg,
944            subscription_service_checkpoint_sender,
945        };
946
947        info!("SuiNode started!");
948        let node = Arc::new(node);
949        let node_copy = node.clone();
950        spawn_monitored_task!(async move {
951            let result = Self::monitor_reconfiguration(node_copy, epoch_store).await;
952            if let Err(error) = result {
953                warn!("Reconfiguration finished with error {:?}", error);
954            }
955        });
956
957        Ok(node)
958    }
959
960    pub fn subscribe_to_epoch_change(&self) -> broadcast::Receiver<SuiSystemState> {
961        self.end_of_epoch_channel.subscribe()
962    }
963
964    pub fn subscribe_to_shutdown_channel(&self) -> broadcast::Receiver<Option<RunWithRange>> {
965        self.shutdown_channel_tx.subscribe()
966    }
967
968    pub fn current_epoch_for_testing(&self) -> EpochId {
969        self.state.current_epoch_for_testing()
970    }
971
972    pub fn db_checkpoint_path(&self) -> PathBuf {
973        self.config.db_checkpoint_path()
974    }
975
976    // Init reconfig process by starting to reject user certs
977    pub async fn close_epoch(&self, epoch_store: &Arc<AuthorityPerEpochStore>) -> SuiResult {
978        info!("close_epoch (current epoch = {})", epoch_store.epoch());
979        self.validator_components
980            .lock()
981            .await
982            .as_ref()
983            .ok_or_else(|| SuiError::from("Node is not a validator"))?
984            .consensus_adapter
985            .close_epoch(epoch_store);
986        Ok(())
987    }
988
989    pub fn clear_override_protocol_upgrade_buffer_stake(&self, epoch: EpochId) -> SuiResult {
990        self.state
991            .clear_override_protocol_upgrade_buffer_stake(epoch)
992    }
993
994    pub fn set_override_protocol_upgrade_buffer_stake(
995        &self,
996        epoch: EpochId,
997        buffer_stake_bps: u64,
998    ) -> SuiResult {
999        self.state
1000            .set_override_protocol_upgrade_buffer_stake(epoch, buffer_stake_bps)
1001    }
1002
1003    // Testing-only API to start epoch close process.
1004    // For production code, please use the non-testing version.
1005    pub async fn close_epoch_for_testing(&self) -> SuiResult {
1006        let epoch_store = self.state.epoch_store_for_testing();
1007        self.close_epoch(&epoch_store).await
1008    }
1009
1010    fn start_state_snapshot(
1011        config: &NodeConfig,
1012        prometheus_registry: &Registry,
1013        checkpoint_store: Arc<CheckpointStore>,
1014        chain_identifier: ChainIdentifier,
1015    ) -> Result<Option<tokio::sync::broadcast::Sender<()>>> {
1016        if let Some(remote_store_config) = &config.state_snapshot_write_config.object_store_config {
1017            let snapshot_uploader = StateSnapshotUploader::new(
1018                &config.db_checkpoint_path(),
1019                &config.snapshot_path(),
1020                remote_store_config.clone(),
1021                60,
1022                prometheus_registry,
1023                checkpoint_store,
1024                chain_identifier,
1025                config.state_snapshot_write_config.archive_interval_epochs,
1026            )?;
1027            Ok(Some(snapshot_uploader.start()))
1028        } else {
1029            Ok(None)
1030        }
1031    }
1032
1033    fn start_db_checkpoint(
1034        config: &NodeConfig,
1035        prometheus_registry: &Registry,
1036        state_snapshot_enabled: bool,
1037    ) -> Result<(
1038        DBCheckpointConfig,
1039        Option<tokio::sync::broadcast::Sender<()>>,
1040    )> {
1041        let checkpoint_path = Some(
1042            config
1043                .db_checkpoint_config
1044                .checkpoint_path
1045                .clone()
1046                .unwrap_or_else(|| config.db_checkpoint_path()),
1047        );
1048        let db_checkpoint_config = if config.db_checkpoint_config.checkpoint_path.is_none() {
1049            DBCheckpointConfig {
1050                checkpoint_path,
1051                perform_db_checkpoints_at_epoch_end: if state_snapshot_enabled {
1052                    true
1053                } else {
1054                    config
1055                        .db_checkpoint_config
1056                        .perform_db_checkpoints_at_epoch_end
1057                },
1058                ..config.db_checkpoint_config.clone()
1059            }
1060        } else {
1061            config.db_checkpoint_config.clone()
1062        };
1063
1064        match (
1065            db_checkpoint_config.object_store_config.as_ref(),
1066            state_snapshot_enabled,
1067        ) {
1068            // If db checkpoint config object store not specified but
1069            // state snapshot object store is specified, create handler
1070            // anyway for marking db checkpoints as completed so that they
1071            // can be uploaded as state snapshots.
1072            (None, false) => Ok((db_checkpoint_config, None)),
1073            (_, _) => {
1074                let handler = DBCheckpointHandler::new(
1075                    &db_checkpoint_config.checkpoint_path.clone().unwrap(),
1076                    db_checkpoint_config.object_store_config.as_ref(),
1077                    60,
1078                    db_checkpoint_config
1079                        .prune_and_compact_before_upload
1080                        .unwrap_or(true),
1081                    config.authority_store_pruning_config.clone(),
1082                    prometheus_registry,
1083                    state_snapshot_enabled,
1084                )?;
1085                Ok((
1086                    db_checkpoint_config,
1087                    Some(DBCheckpointHandler::start(handler)),
1088                ))
1089            }
1090        }
1091    }
1092
1093    fn create_p2p_network(
1094        config: &NodeConfig,
1095        state_sync_store: RocksDbStore,
1096        chain_identifier: ChainIdentifier,
1097        randomness_tx: mpsc::Sender<(EpochId, RandomnessRound, Vec<u8>)>,
1098        prometheus_registry: &Registry,
1099    ) -> Result<P2pComponents> {
1100        let mut p2p_config = config.p2p_config.clone();
1101        {
1102            let disc = p2p_config.discovery.get_or_insert_with(Default::default);
1103            if disc.peer_addr_store_path.is_none() {
1104                disc.peer_addr_store_path =
1105                    Some(config.db_path().join("discovery_peer_cache.yaml"));
1106            }
1107        }
1108        let mut discovery_builder = discovery::Builder::new().config(p2p_config.clone());
1109        if let Some(consensus_config) = &config.consensus_config {
1110            let effective_addr = consensus_config
1111                .external_address
1112                .as_ref()
1113                .or(consensus_config.listen_address.as_ref());
1114            if let Some(addr) = effective_addr {
1115                discovery_builder = discovery_builder.consensus_external_address(addr.clone());
1116            }
1117        }
1118        let (discovery, discovery_server, endpoint_manager) = discovery_builder.build();
1119        let discovery_sender = discovery.sender();
1120
1121        let (state_sync, state_sync_router) = state_sync::Builder::new()
1122            .config(config.p2p_config.state_sync.clone().unwrap_or_default())
1123            .store(state_sync_store)
1124            .archive_config(config.archive_reader_config())
1125            .discovery_sender(discovery_sender)
1126            .with_metrics(prometheus_registry)
1127            .build();
1128
1129        let discovery_config = config.p2p_config.discovery.clone().unwrap_or_default();
1130        let known_peers: HashMap<PeerId, String> = discovery_config
1131            .allowlisted_peers
1132            .clone()
1133            .into_iter()
1134            .map(|ap| (ap.peer_id, "allowlisted_peer".to_string()))
1135            .chain(config.p2p_config.seed_peers.iter().filter_map(|peer| {
1136                peer.peer_id
1137                    .map(|peer_id| (peer_id, "seed_peer".to_string()))
1138            }))
1139            .collect();
1140
1141        let (randomness, randomness_router) =
1142            randomness::Builder::new(config.protocol_public_key(), randomness_tx)
1143                .config(config.p2p_config.randomness.clone().unwrap_or_default())
1144                .with_metrics(prometheus_registry)
1145                .build();
1146
1147        let p2p_network = {
1148            let routes = anemo::Router::new()
1149                .add_rpc_service(discovery_server)
1150                .merge(state_sync_router);
1151            let routes = routes.merge(randomness_router);
1152
1153            let inbound_network_metrics =
1154                mysten_network::metrics::NetworkMetrics::new("sui", "inbound", prometheus_registry);
1155            let outbound_network_metrics = mysten_network::metrics::NetworkMetrics::new(
1156                "sui",
1157                "outbound",
1158                prometheus_registry,
1159            );
1160
1161            let service = ServiceBuilder::new()
1162                .layer(
1163                    TraceLayer::new_for_server_errors()
1164                        .make_span_with(DefaultMakeSpan::new().level(tracing::Level::INFO))
1165                        .on_failure(DefaultOnFailure::new().level(tracing::Level::WARN)),
1166                )
1167                .layer(CallbackLayer::new(
1168                    mysten_network::metrics::MetricsMakeCallbackHandler::new(
1169                        Arc::new(inbound_network_metrics),
1170                        config.p2p_config.excessive_message_size(),
1171                    ),
1172                ))
1173                .service(routes);
1174
1175            let outbound_layer = ServiceBuilder::new()
1176                .layer(
1177                    TraceLayer::new_for_client_and_server_errors()
1178                        .make_span_with(DefaultMakeSpan::new().level(tracing::Level::INFO))
1179                        .on_failure(DefaultOnFailure::new().level(tracing::Level::WARN)),
1180                )
1181                .layer(CallbackLayer::new(
1182                    mysten_network::metrics::MetricsMakeCallbackHandler::new(
1183                        Arc::new(outbound_network_metrics),
1184                        config.p2p_config.excessive_message_size(),
1185                    ),
1186                ))
1187                .into_inner();
1188
1189            let mut anemo_config = config.p2p_config.anemo_config.clone().unwrap_or_default();
1190            // Inbound requests on this network are small (signatures, queries, summaries).
1191            // Cap request frames at 1 MiB.
1192            anemo_config.max_request_frame_size = Some(1 << 20);
1193            // Responses can be larger (checkpoint contents).
1194            // Cap response frames at 128 MiB.
1195            anemo_config.max_response_frame_size = Some(128 << 20);
1196
1197            // Set a higher default value for socket send/receive buffers if not already
1198            // configured.
1199            let mut quic_config = anemo_config.quic.unwrap_or_default();
1200            if quic_config.socket_send_buffer_size.is_none() {
1201                quic_config.socket_send_buffer_size = Some(20 << 20);
1202            }
1203            if quic_config.socket_receive_buffer_size.is_none() {
1204                quic_config.socket_receive_buffer_size = Some(20 << 20);
1205            }
1206            quic_config.allow_failed_socket_buffer_size_setting = true;
1207
1208            // Set high-performance defaults for quinn transport.
1209            // With 200MiB buffer size and ~500ms RTT, max throughput ~400MiB/s.
1210            if quic_config.max_concurrent_bidi_streams.is_none() {
1211                quic_config.max_concurrent_bidi_streams = Some(500);
1212            }
1213            if quic_config.max_concurrent_uni_streams.is_none() {
1214                quic_config.max_concurrent_uni_streams = Some(500);
1215            }
1216            if quic_config.stream_receive_window.is_none() {
1217                quic_config.stream_receive_window = Some(100 << 20);
1218            }
1219            if quic_config.receive_window.is_none() {
1220                quic_config.receive_window = Some(200 << 20);
1221            }
1222            if quic_config.send_window.is_none() {
1223                quic_config.send_window = Some(200 << 20);
1224            }
1225            if quic_config.crypto_buffer_size.is_none() {
1226                quic_config.crypto_buffer_size = Some(1 << 20);
1227            }
1228            if quic_config.max_idle_timeout_ms.is_none() {
1229                quic_config.max_idle_timeout_ms = Some(10_000);
1230            }
1231            if quic_config.keep_alive_interval_ms.is_none() {
1232                quic_config.keep_alive_interval_ms = Some(5_000);
1233            }
1234            anemo_config.quic = Some(quic_config);
1235
1236            let server_name = format!("sui-{}", chain_identifier);
1237            let network = Network::bind(config.p2p_config.listen_address)
1238                .server_name(&server_name)
1239                .private_key(config.network_key_pair().copy().private().0.to_bytes())
1240                .config(anemo_config)
1241                .outbound_request_layer(outbound_layer)
1242                .start(service)?;
1243            info!(
1244                server_name = server_name,
1245                "P2p network started on {}",
1246                network.local_addr()
1247            );
1248
1249            network
1250        };
1251
1252        let discovery_handle =
1253            discovery.start(p2p_network.clone(), config.network_key_pair().copy());
1254        let state_sync_handle = state_sync.start(p2p_network.clone());
1255        let randomness_handle = randomness.start(p2p_network.clone());
1256
1257        Ok(P2pComponents {
1258            p2p_network,
1259            known_peers,
1260            discovery_handle,
1261            state_sync_handle,
1262            randomness_handle,
1263            endpoint_manager,
1264        })
1265    }
1266
1267    async fn construct_validator_components(
1268        config: NodeConfig,
1269        state: Arc<AuthorityState>,
1270        committee: Arc<Committee>,
1271        epoch_store: Arc<AuthorityPerEpochStore>,
1272        checkpoint_store: Arc<CheckpointStore>,
1273        state_sync_handle: state_sync::Handle,
1274        randomness_handle: randomness::Handle,
1275        global_state_hasher: Weak<GlobalStateHasher>,
1276        backpressure_manager: Arc<BackpressureManager>,
1277        registry_service: &RegistryService,
1278        sui_node_metrics: Arc<SuiNodeMetrics>,
1279        checkpoint_metrics: Arc<CheckpointMetrics>,
1280        node_role: NodeRole,
1281    ) -> Result<ValidatorComponents> {
1282        let mut config_clone = config.clone();
1283        let consensus_config = config_clone
1284            .consensus_config
1285            .as_mut()
1286            .ok_or_else(|| anyhow!("Node is missing consensus config"))?;
1287
1288        let client = Arc::new(UpdatableConsensusClient::new());
1289        let inflight_slot_freed_notify = Arc::new(tokio::sync::Notify::new());
1290        let consensus_adapter = Arc::new(Self::construct_consensus_adapter(
1291            &committee,
1292            consensus_config,
1293            state.name,
1294            &registry_service.default_registry(),
1295            client.clone(),
1296            checkpoint_store.clone(),
1297            inflight_slot_freed_notify.clone(),
1298        ));
1299
1300        let consensus_manager = Arc::new(ConsensusManager::new(
1301            &config,
1302            consensus_config,
1303            registry_service,
1304            client,
1305            node_role,
1306        ));
1307
1308        // This only gets started up once, not on every epoch. (Make call to remove every epoch.)
1309        let consensus_store_pruner = ConsensusStorePruner::new(
1310            consensus_manager.get_storage_base_path(),
1311            consensus_config.db_retention_epochs(),
1312            consensus_config.db_pruner_period(),
1313            &registry_service.default_registry(),
1314        );
1315
1316        let sui_tx_validator_metrics =
1317            SuiTxValidatorMetrics::new(&registry_service.default_registry());
1318
1319        let (validator_server_handle, admission_queue) = if node_role.is_validator() {
1320            let (handle, queue) = Self::start_grpc_validator_service(
1321                &config,
1322                state.clone(),
1323                consensus_adapter.clone(),
1324                epoch_store.clone(),
1325                &registry_service.default_registry(),
1326                inflight_slot_freed_notify,
1327            )
1328            .await?;
1329            (Some(handle), queue)
1330        } else {
1331            (None, None)
1332        };
1333
1334        // Starts an overload monitor that monitors the execution of the authority.
1335        // Don't start the overload monitor when max_load_shedding_percentage is 0.
1336        let validator_overload_monitor_handle = if node_role.is_validator()
1337            && config
1338                .authority_overload_config
1339                .max_load_shedding_percentage
1340                > 0
1341        {
1342            let authority_state = Arc::downgrade(&state);
1343            let overload_config = config.authority_overload_config.clone();
1344            fail_point!("starting_overload_monitor");
1345            Some(spawn_monitored_task!(overload_monitor(
1346                authority_state,
1347                overload_config,
1348            )))
1349        } else {
1350            None
1351        };
1352
1353        Self::start_epoch_specific_validator_components(
1354            &config,
1355            state.clone(),
1356            consensus_adapter,
1357            checkpoint_store,
1358            epoch_store,
1359            state_sync_handle,
1360            randomness_handle,
1361            consensus_manager,
1362            consensus_store_pruner,
1363            global_state_hasher,
1364            backpressure_manager,
1365            validator_server_handle,
1366            validator_overload_monitor_handle,
1367            checkpoint_metrics,
1368            sui_node_metrics,
1369            sui_tx_validator_metrics,
1370            admission_queue,
1371            node_role,
1372        )
1373        .await
1374    }
1375
1376    async fn start_epoch_specific_validator_components(
1377        config: &NodeConfig,
1378        state: Arc<AuthorityState>,
1379        consensus_adapter: Arc<ConsensusAdapter>,
1380        checkpoint_store: Arc<CheckpointStore>,
1381        epoch_store: Arc<AuthorityPerEpochStore>,
1382        state_sync_handle: state_sync::Handle,
1383        randomness_handle: randomness::Handle,
1384        consensus_manager: Arc<ConsensusManager>,
1385        consensus_store_pruner: ConsensusStorePruner,
1386        state_hasher: Weak<GlobalStateHasher>,
1387        backpressure_manager: Arc<BackpressureManager>,
1388        validator_server_handle: Option<SpawnOnce>,
1389        validator_overload_monitor_handle: Option<JoinHandle<()>>,
1390        checkpoint_metrics: Arc<CheckpointMetrics>,
1391        sui_node_metrics: Arc<SuiNodeMetrics>,
1392        sui_tx_validator_metrics: Arc<SuiTxValidatorMetrics>,
1393        admission_queue: Option<AdmissionQueueContext>,
1394        node_role: NodeRole,
1395    ) -> Result<ValidatorComponents> {
1396        let checkpoint_service = Self::build_checkpoint_service(
1397            config,
1398            consensus_adapter.clone(),
1399            checkpoint_store.clone(),
1400            epoch_store.clone(),
1401            state.clone(),
1402            state_sync_handle,
1403            state_hasher,
1404            checkpoint_metrics.clone(),
1405            node_role,
1406        );
1407
1408        if node_role.runs_consensus() && epoch_store.randomness_state_enabled() {
1409            let randomness_manager = RandomnessManager::try_new(
1410                Arc::downgrade(&epoch_store),
1411                Box::new(consensus_adapter.clone()),
1412                randomness_handle,
1413                config.protocol_key_pair(),
1414            )
1415            .await;
1416            if let Some(randomness_manager) = randomness_manager {
1417                epoch_store
1418                    .set_randomness_manager(randomness_manager)
1419                    .await?;
1420            }
1421        }
1422
1423        if node_role.is_validator() {
1424            ExecutionTimeObserver::spawn(
1425                epoch_store.clone(),
1426                Box::new(consensus_adapter.clone()),
1427                config
1428                    .execution_time_observer_config
1429                    .clone()
1430                    .unwrap_or_default(),
1431            );
1432        }
1433
1434        let throughput_calculator = Arc::new(ConsensusThroughputCalculator::new(
1435            None,
1436            state.metrics.clone(),
1437        ));
1438
1439        let consensus_handler_initializer = ConsensusHandlerInitializer::new(
1440            state.clone(),
1441            checkpoint_service.clone(),
1442            epoch_store.clone(),
1443            consensus_adapter.clone(),
1444            throughput_calculator,
1445            backpressure_manager,
1446            config.congestion_log.clone(),
1447        );
1448
1449        info!("Starting consensus manager asynchronously");
1450
1451        // Spawn consensus startup asynchronously to avoid blocking other components
1452        tokio::spawn({
1453            let config = config.clone();
1454            let epoch_store = epoch_store.clone();
1455            let sui_tx_validator = SuiTxValidator::new(
1456                state.clone(),
1457                epoch_store.clone(),
1458                checkpoint_service.clone(),
1459                sui_tx_validator_metrics.clone(),
1460            );
1461            let consensus_manager = consensus_manager.clone();
1462            async move {
1463                consensus_manager
1464                    .start(
1465                        &config,
1466                        epoch_store,
1467                        consensus_handler_initializer,
1468                        sui_tx_validator,
1469                    )
1470                    .await;
1471            }
1472        });
1473        let replay_waiter = consensus_manager.replay_waiter();
1474
1475        info!("Spawning checkpoint service");
1476        let replay_waiter = if std::env::var("DISABLE_REPLAY_WAITER").is_ok() {
1477            None
1478        } else {
1479            Some(replay_waiter)
1480        };
1481        checkpoint_service
1482            .spawn(epoch_store.clone(), replay_waiter)
1483            .await;
1484
1485        if node_role.is_validator() && epoch_store.authenticator_state_enabled() {
1486            Self::start_jwk_updater(
1487                config,
1488                sui_node_metrics,
1489                state.name,
1490                epoch_store.clone(),
1491                consensus_adapter.clone(),
1492            );
1493        }
1494
1495        if let Some(ctx) = &admission_queue {
1496            ctx.rotate_for_epoch(epoch_store);
1497        }
1498
1499        Ok(ValidatorComponents {
1500            validator_server_handle,
1501            validator_overload_monitor_handle,
1502            consensus_manager,
1503            consensus_store_pruner,
1504            consensus_adapter,
1505            checkpoint_metrics,
1506            sui_tx_validator_metrics,
1507            admission_queue,
1508        })
1509    }
1510
1511    fn build_checkpoint_service(
1512        config: &NodeConfig,
1513        consensus_adapter: Arc<ConsensusAdapter>,
1514        checkpoint_store: Arc<CheckpointStore>,
1515        epoch_store: Arc<AuthorityPerEpochStore>,
1516        state: Arc<AuthorityState>,
1517        state_sync_handle: state_sync::Handle,
1518        state_hasher: Weak<GlobalStateHasher>,
1519        checkpoint_metrics: Arc<CheckpointMetrics>,
1520        node_role: NodeRole,
1521    ) -> Arc<CheckpointService> {
1522        let epoch_start_timestamp_ms = epoch_store.epoch_start_state().epoch_start_timestamp_ms();
1523        let epoch_duration_ms = epoch_store.epoch_start_state().epoch_duration_ms();
1524
1525        debug!(
1526            "Starting checkpoint service with epoch start timestamp {}
1527            and epoch duration {}",
1528            epoch_start_timestamp_ms, epoch_duration_ms
1529        );
1530
1531        let checkpoint_output: Box<dyn CheckpointOutput> = if node_role.is_validator() {
1532            Box::new(SubmitCheckpointToConsensus {
1533                sender: consensus_adapter,
1534                signer: state.secret.clone(),
1535                authority: config.protocol_public_key(),
1536                next_reconfiguration_timestamp_ms: epoch_start_timestamp_ms
1537                    .checked_add(epoch_duration_ms)
1538                    .expect("Overflow calculating next_reconfiguration_timestamp_ms"),
1539                metrics: checkpoint_metrics.clone(),
1540            })
1541        } else {
1542            LogCheckpointOutput::boxed()
1543        };
1544
1545        let certified_checkpoint_output = SendCheckpointToStateSync::new(state_sync_handle);
1546        let max_tx_per_checkpoint = max_tx_per_checkpoint(epoch_store.protocol_config());
1547        let max_checkpoint_size_bytes =
1548            epoch_store.protocol_config().max_checkpoint_size_bytes() as usize;
1549
1550        CheckpointService::build(
1551            state.clone(),
1552            checkpoint_store,
1553            epoch_store,
1554            state.get_transaction_cache_reader().clone(),
1555            state_hasher,
1556            checkpoint_output,
1557            Box::new(certified_checkpoint_output),
1558            checkpoint_metrics,
1559            max_tx_per_checkpoint,
1560            max_checkpoint_size_bytes,
1561        )
1562    }
1563
1564    fn construct_consensus_adapter(
1565        committee: &Committee,
1566        consensus_config: &ConsensusConfig,
1567        authority: AuthorityName,
1568        prometheus_registry: &Registry,
1569        consensus_client: Arc<dyn ConsensusClient>,
1570        checkpoint_store: Arc<CheckpointStore>,
1571        inflight_slot_freed_notify: Arc<tokio::sync::Notify>,
1572    ) -> ConsensusAdapter {
1573        let ca_metrics = ConsensusAdapterMetrics::new(prometheus_registry);
1574        // The consensus adapter allows the authority to send user certificates through consensus.
1575
1576        ConsensusAdapter::new(
1577            consensus_client,
1578            checkpoint_store,
1579            authority,
1580            consensus_config.max_pending_transactions(),
1581            consensus_config.max_pending_transactions() * 2 / committee.num_members(),
1582            ca_metrics,
1583            inflight_slot_freed_notify,
1584        )
1585    }
1586
1587    async fn start_grpc_validator_service(
1588        config: &NodeConfig,
1589        state: Arc<AuthorityState>,
1590        consensus_adapter: Arc<ConsensusAdapter>,
1591        epoch_store: Arc<AuthorityPerEpochStore>,
1592        prometheus_registry: &Registry,
1593        inflight_slot_freed_notify: Arc<tokio::sync::Notify>,
1594    ) -> Result<(SpawnOnce, Option<AdmissionQueueContext>)> {
1595        let overload_config = &config.authority_overload_config;
1596        let admission_queue = overload_config.admission_queue_enabled.then(|| {
1597            let manager = Arc::new(AdmissionQueueManager::new(
1598                consensus_adapter.clone(),
1599                Arc::new(AdmissionQueueMetrics::new(prometheus_registry)),
1600                overload_config.admission_queue_capacity_fraction,
1601                overload_config.admission_queue_bypass_fraction,
1602                overload_config.admission_queue_failover_timeout,
1603                inflight_slot_freed_notify,
1604            ));
1605            AdmissionQueueContext::spawn(manager, epoch_store)
1606        });
1607        let validator_service = ValidatorService::new(
1608            state.clone(),
1609            consensus_adapter,
1610            Arc::new(ValidatorServiceMetrics::new(prometheus_registry)),
1611            config.policy_config.clone().map(|p| p.client_id_source),
1612            admission_queue.clone(),
1613        );
1614
1615        let mut server_conf = mysten_network::config::Config::new();
1616        server_conf.connect_timeout = Some(DEFAULT_GRPC_CONNECT_TIMEOUT);
1617        server_conf.http2_keepalive_interval = Some(DEFAULT_GRPC_CONNECT_TIMEOUT);
1618        server_conf.http2_keepalive_timeout = Some(DEFAULT_GRPC_CONNECT_TIMEOUT);
1619        server_conf.global_concurrency_limit = config.grpc_concurrency_limit;
1620        server_conf.load_shed = config.grpc_load_shed;
1621        let mut server_builder =
1622            ServerBuilder::from_config(&server_conf, GrpcMetrics::new(prometheus_registry));
1623
1624        server_builder = server_builder.add_service(ValidatorServer::new(validator_service));
1625
1626        let tls_config = sui_tls::create_rustls_server_config(
1627            config.network_key_pair().copy().private(),
1628            SUI_TLS_SERVER_NAME.to_string(),
1629        );
1630
1631        let network_address = config.network_address().clone();
1632
1633        let (ready_tx, ready_rx) = oneshot::channel();
1634
1635        let spawn_once = SpawnOnce::new(ready_rx, async move {
1636            let server = server_builder
1637                .bind(&network_address, Some(tls_config))
1638                .await
1639                .unwrap_or_else(|err| panic!("Failed to bind to {network_address}: {err}"));
1640            let local_addr = server.local_addr();
1641            info!("Listening to traffic on {local_addr}");
1642            ready_tx.send(()).unwrap();
1643            if let Err(err) = server.serve().await {
1644                info!("Server stopped: {err}");
1645            }
1646            info!("Server stopped");
1647        });
1648        Ok((spawn_once, admission_queue))
1649    }
1650
1651    pub fn state(&self) -> Arc<AuthorityState> {
1652        self.state.clone()
1653    }
1654
1655    pub fn node_role(&self) -> NodeRole {
1656        self.state.load_epoch_store_one_call_per_task().node_role()
1657    }
1658
1659    // Only used for testing because of how epoch store is loaded.
1660    pub fn reference_gas_price_for_testing(&self) -> Result<u64, anyhow::Error> {
1661        self.state.reference_gas_price_for_testing()
1662    }
1663
1664    pub fn clone_committee_store(&self) -> Arc<CommitteeStore> {
1665        self.state.committee_store().clone()
1666    }
1667
1668    /*
1669    pub fn clone_authority_store(&self) -> Arc<AuthorityStore> {
1670        self.state.db()
1671    }
1672    */
1673
1674    /// Clone an AuthorityAggregator currently used in this node, if the node is a fullnode.
1675    /// After reconfig, Transaction Driver builds a new AuthorityAggregator. The caller
1676    /// of this function will mostly likely want to call this again
1677    /// to get a fresh one.
1678    pub fn clone_authority_aggregator(
1679        &self,
1680    ) -> Option<Arc<AuthorityAggregator<NetworkAuthorityClient>>> {
1681        self.transaction_orchestrator
1682            .as_ref()
1683            .map(|to| to.clone_authority_aggregator())
1684    }
1685
1686    pub fn transaction_orchestrator(
1687        &self,
1688    ) -> Option<Arc<TransactionOrchestrator<NetworkAuthorityClient>>> {
1689        self.transaction_orchestrator.clone()
1690    }
1691
1692    /// This function awaits the completion of checkpoint execution of the current epoch,
1693    /// after which it initiates reconfiguration of the entire system.
1694    pub async fn monitor_reconfiguration(
1695        self: Arc<Self>,
1696        mut epoch_store: Arc<AuthorityPerEpochStore>,
1697    ) -> Result<()> {
1698        let checkpoint_executor_metrics =
1699            CheckpointExecutorMetrics::new(&self.registry_service.default_registry());
1700
1701        loop {
1702            let mut hasher_guard = self.global_state_hasher.lock().await;
1703            let hasher = hasher_guard.take().unwrap();
1704            info!(
1705                "Creating checkpoint executor for epoch {}",
1706                epoch_store.epoch()
1707            );
1708            let checkpoint_executor = CheckpointExecutor::new(
1709                epoch_store.clone(),
1710                self.checkpoint_store.clone(),
1711                self.state.clone(),
1712                hasher.clone(),
1713                self.backpressure_manager.clone(),
1714                self.config.checkpoint_executor_config.clone(),
1715                checkpoint_executor_metrics.clone(),
1716                self.subscription_service_checkpoint_sender.clone(),
1717            );
1718
1719            let run_with_range = self.config.run_with_range;
1720
1721            let cur_epoch_store = self.state.load_epoch_store_one_call_per_task();
1722
1723            // Update the current protocol version metric.
1724            self.metrics
1725                .current_protocol_version
1726                .set(cur_epoch_store.protocol_config().version.as_u64() as i64);
1727
1728            // Advertise capabilities to committee, if we are a validator.
1729            // FullNodes that state sync via consensus will also have validator components, by they are not supposed to submit any capabilities.
1730            if let Some(components) = &*self.validator_components.lock().await
1731                && cur_epoch_store.is_validator()
1732            {
1733                // TODO: without this sleep, the consensus message is not delivered reliably.
1734                tokio::time::sleep(Duration::from_millis(1)).await;
1735
1736                let config = cur_epoch_store.protocol_config();
1737                let mut supported_protocol_versions = self
1738                    .config
1739                    .supported_protocol_versions
1740                    .expect("Supported versions should be populated")
1741                    // no need to send digests of versions less than the current version
1742                    .truncate_below(config.version);
1743
1744                while supported_protocol_versions.max > config.version {
1745                    let proposed_protocol_config = ProtocolConfig::get_for_version(
1746                        supported_protocol_versions.max,
1747                        cur_epoch_store.get_chain(),
1748                    );
1749
1750                    if proposed_protocol_config.enable_accumulators()
1751                        && !epoch_store.accumulator_root_exists()
1752                    {
1753                        error!(
1754                            "cannot upgrade to protocol version {:?} because accumulator root does not exist",
1755                            supported_protocol_versions.max
1756                        );
1757                        supported_protocol_versions.max = supported_protocol_versions.max.prev();
1758                    } else {
1759                        break;
1760                    }
1761                }
1762
1763                let binary_config = config.binary_config(None);
1764                let transaction = ConsensusTransaction::new_capability_notification_v2(
1765                    AuthorityCapabilitiesV2::new(
1766                        self.state.name,
1767                        cur_epoch_store.get_chain_identifier().chain(),
1768                        supported_protocol_versions,
1769                        self.state
1770                            .get_available_system_packages(&binary_config)
1771                            .await,
1772                    ),
1773                );
1774                info!(?transaction, "submitting capabilities to consensus");
1775                components.consensus_adapter.submit(
1776                    transaction,
1777                    None,
1778                    &cur_epoch_store,
1779                    None,
1780                    None,
1781                )?;
1782            }
1783
1784            let stop_condition = checkpoint_executor.run_epoch(run_with_range).await;
1785
1786            if stop_condition == StopReason::RunWithRangeCondition {
1787                SuiNode::shutdown(&self).await;
1788                self.shutdown_channel_tx
1789                    .send(run_with_range)
1790                    .expect("RunWithRangeCondition met but failed to send shutdown message");
1791                return Ok(());
1792            }
1793
1794            // Safe to call because we are in the middle of reconfiguration.
1795            let latest_system_state = self
1796                .state
1797                .get_object_cache_reader()
1798                .get_sui_system_state_object_unsafe()
1799                .expect("Read Sui System State object cannot fail");
1800
1801            #[cfg(msim)]
1802            if !self
1803                .sim_state
1804                .sim_safe_mode_expected
1805                .load(Ordering::Relaxed)
1806            {
1807                debug_assert!(!latest_system_state.safe_mode());
1808            }
1809
1810            #[cfg(not(msim))]
1811            debug_assert!(!latest_system_state.safe_mode());
1812
1813            if let Err(err) = self.end_of_epoch_channel.send(latest_system_state.clone())
1814                && self.state.is_fullnode(&cur_epoch_store)
1815            {
1816                warn!(
1817                    "Failed to send end of epoch notification to subscriber: {:?}",
1818                    err
1819                );
1820            }
1821
1822            cur_epoch_store.record_is_safe_mode_metric(latest_system_state.safe_mode());
1823            let new_epoch_start_state = latest_system_state.into_epoch_start_state();
1824
1825            self.auth_agg.store(Arc::new(
1826                self.auth_agg
1827                    .load()
1828                    .recreate_with_new_epoch_start_state(&new_epoch_start_state),
1829            ));
1830
1831            let next_epoch_committee = new_epoch_start_state.get_sui_committee();
1832            let next_epoch = next_epoch_committee.epoch();
1833            assert_eq!(cur_epoch_store.epoch() + 1, next_epoch);
1834
1835            info!(
1836                next_epoch,
1837                "Finished executing all checkpoints in epoch. About to reconfigure the system."
1838            );
1839
1840            fail_point_async!("reconfig_delay");
1841
1842            cur_epoch_store.record_epoch_reconfig_start_time_metric();
1843
1844            update_peer_addresses(&self.config, &self.endpoint_manager, &new_epoch_start_state);
1845
1846            let mut validator_components_lock_guard = self.validator_components.lock().await;
1847
1848            // The following code handles 4 different cases, depending on whether the node
1849            // was a validator in the previous epoch, and whether the node is a validator
1850            // in the new epoch.
1851            let new_epoch_store = self
1852                .reconfigure_state(
1853                    &self.state,
1854                    &cur_epoch_store,
1855                    next_epoch_committee.clone(),
1856                    new_epoch_start_state,
1857                    hasher.clone(),
1858                )
1859                .await;
1860
1861            let new_role = new_epoch_store.node_role();
1862
1863            let new_validator_components = if let Some(ValidatorComponents {
1864                validator_server_handle,
1865                validator_overload_monitor_handle,
1866                consensus_manager,
1867                consensus_store_pruner,
1868                consensus_adapter,
1869                checkpoint_metrics,
1870                sui_tx_validator_metrics,
1871                admission_queue,
1872            }) = validator_components_lock_guard.take()
1873            {
1874                info!("Reconfiguring node (was running consensus).");
1875
1876                consensus_manager.shutdown().await;
1877                info!("Consensus has shut down.");
1878
1879                info!("Epoch store finished reconfiguration.");
1880
1881                // No other components should be holding a strong reference to state hasher
1882                // at this point. Confirm here before we swap in the new hasher.
1883                let global_state_hasher_metrics = Arc::into_inner(hasher)
1884                    .expect("Object state hasher should have no other references at this point")
1885                    .metrics();
1886                let new_hasher = Arc::new(GlobalStateHasher::new(
1887                    self.state.get_global_state_hash_store().clone(),
1888                    global_state_hasher_metrics,
1889                ));
1890                let weak_hasher = Arc::downgrade(&new_hasher);
1891                *hasher_guard = Some(new_hasher);
1892
1893                consensus_store_pruner.prune(next_epoch).await;
1894
1895                if new_role.runs_consensus() {
1896                    info!("Restarting consensus as {new_role}");
1897                    Some(
1898                        Self::start_epoch_specific_validator_components(
1899                            &self.config,
1900                            self.state.clone(),
1901                            consensus_adapter,
1902                            self.checkpoint_store.clone(),
1903                            new_epoch_store.clone(),
1904                            self.state_sync_handle.clone(),
1905                            self.randomness_handle.clone(),
1906                            consensus_manager,
1907                            consensus_store_pruner,
1908                            weak_hasher,
1909                            self.backpressure_manager.clone(),
1910                            validator_server_handle,
1911                            validator_overload_monitor_handle,
1912                            checkpoint_metrics,
1913                            self.metrics.clone(),
1914                            sui_tx_validator_metrics,
1915                            admission_queue,
1916                            new_role,
1917                        )
1918                        .await?,
1919                    )
1920                } else {
1921                    info!(
1922                        "This node has new role {new_role} and no longer runs consensus after reconfiguration"
1923                    );
1924                    None
1925                }
1926            } else {
1927                // No other components should be holding a strong reference to state hasher
1928                // at this point. Confirm here before we swap in the new hasher.
1929                let global_state_hasher_metrics = Arc::into_inner(hasher)
1930                    .expect("Object state hasher should have no other references at this point")
1931                    .metrics();
1932                let new_hasher = Arc::new(GlobalStateHasher::new(
1933                    self.state.get_global_state_hash_store().clone(),
1934                    global_state_hasher_metrics,
1935                ));
1936                let weak_hasher = Arc::downgrade(&new_hasher);
1937                *hasher_guard = Some(new_hasher);
1938
1939                if new_role.runs_consensus() {
1940                    info!("Promoting node to {new_role}, starting consensus components");
1941
1942                    let mut components = Self::construct_validator_components(
1943                        self.config.clone(),
1944                        self.state.clone(),
1945                        Arc::new(next_epoch_committee.clone()),
1946                        new_epoch_store.clone(),
1947                        self.checkpoint_store.clone(),
1948                        self.state_sync_handle.clone(),
1949                        self.randomness_handle.clone(),
1950                        weak_hasher,
1951                        self.backpressure_manager.clone(),
1952                        &self.registry_service,
1953                        self.metrics.clone(),
1954                        self.checkpoint_metrics.clone(),
1955                        new_role,
1956                    )
1957                    .await?;
1958
1959                    if new_role.is_validator() {
1960                        components.validator_server_handle = Some(
1961                            components
1962                                .validator_server_handle
1963                                .take()
1964                                .unwrap()
1965                                .start()
1966                                .await,
1967                        );
1968
1969                        self.endpoint_manager
1970                            .set_consensus_address_updater(components.consensus_manager.clone());
1971                    }
1972
1973                    Some(components)
1974                } else {
1975                    None
1976                }
1977            };
1978            *validator_components_lock_guard = new_validator_components;
1979
1980            // Force releasing current epoch store DB handle, because the
1981            // Arc<AuthorityPerEpochStore> may linger.
1982            cur_epoch_store.release_db_handles();
1983
1984            if cfg!(msim)
1985                && !matches!(
1986                    self.config
1987                        .authority_store_pruning_config
1988                        .num_epochs_to_retain_for_checkpoints(),
1989                    None | Some(u64::MAX) | Some(0)
1990                )
1991            {
1992                self.state
1993                    .prune_checkpoints_for_eligible_epochs_for_testing(
1994                        self.config.clone(),
1995                        sui_core::authority::authority_store_pruner::AuthorityStorePruningMetrics::new_for_test(),
1996                    )
1997                    .await?;
1998            }
1999
2000            epoch_store = new_epoch_store;
2001            info!("Reconfiguration finished");
2002        }
2003    }
2004
2005    async fn shutdown(&self) {
2006        if let Some(validator_components) = &*self.validator_components.lock().await {
2007            validator_components.consensus_manager.shutdown().await;
2008        }
2009    }
2010
2011    async fn reconfigure_state(
2012        &self,
2013        state: &Arc<AuthorityState>,
2014        cur_epoch_store: &AuthorityPerEpochStore,
2015        next_epoch_committee: Committee,
2016        next_epoch_start_system_state: EpochStartSystemState,
2017        global_state_hasher: Arc<GlobalStateHasher>,
2018    ) -> Arc<AuthorityPerEpochStore> {
2019        let next_epoch = next_epoch_committee.epoch();
2020
2021        let last_checkpoint = self
2022            .checkpoint_store
2023            .get_epoch_last_checkpoint(cur_epoch_store.epoch())
2024            .expect("Error loading last checkpoint for current epoch")
2025            .expect("Could not load last checkpoint for current epoch");
2026
2027        let last_checkpoint_seq = *last_checkpoint.sequence_number();
2028
2029        assert_eq!(
2030            Some(last_checkpoint_seq),
2031            self.checkpoint_store
2032                .get_highest_executed_checkpoint_seq_number()
2033                .expect("Error loading highest executed checkpoint sequence number")
2034        );
2035
2036        let epoch_start_configuration = EpochStartConfiguration::new(
2037            next_epoch_start_system_state,
2038            *last_checkpoint.digest(),
2039            state.get_object_store().as_ref(),
2040            EpochFlag::default_flags_for_new_epoch(&state.config),
2041        )
2042        .expect("EpochStartConfiguration construction cannot fail");
2043
2044        let new_epoch_store = self
2045            .state
2046            .reconfigure(
2047                cur_epoch_store,
2048                self.config.supported_protocol_versions.unwrap(),
2049                next_epoch_committee,
2050                epoch_start_configuration,
2051                global_state_hasher,
2052                &self.config.expensive_safety_check_config,
2053                last_checkpoint_seq,
2054            )
2055            .await
2056            .expect("Reconfigure authority state cannot fail");
2057        info!(next_epoch, "Node State has been reconfigured");
2058        assert_eq!(next_epoch, new_epoch_store.epoch());
2059        self.state.get_reconfig_api().update_epoch_flags_metrics(
2060            cur_epoch_store.epoch_start_config().flags(),
2061            new_epoch_store.epoch_start_config().flags(),
2062        );
2063
2064        new_epoch_store
2065    }
2066
2067    pub fn get_config(&self) -> &NodeConfig {
2068        &self.config
2069    }
2070
2071    pub fn randomness_handle(&self) -> randomness::Handle {
2072        self.randomness_handle.clone()
2073    }
2074
2075    pub fn state_sync_handle(&self) -> state_sync::Handle {
2076        self.state_sync_handle.clone()
2077    }
2078
2079    pub fn endpoint_manager(&self) -> &EndpointManager {
2080        &self.endpoint_manager
2081    }
2082
2083    /// Get a short prefix of a digest for metric labels
2084    fn get_digest_prefix(digest: impl std::fmt::Display) -> String {
2085        let digest_str = digest.to_string();
2086        if digest_str.len() >= 8 {
2087            digest_str[0..8].to_string()
2088        } else {
2089            digest_str
2090        }
2091    }
2092
2093    /// Check for previously detected forks and handle them appropriately.
2094    /// For validators with fork recovery config, clear the fork if it matches the recovery config.
2095    /// For all other cases, block node startup if a fork is detected.
2096    async fn check_and_recover_forks(
2097        checkpoint_store: &CheckpointStore,
2098        checkpoint_metrics: &CheckpointMetrics,
2099        fork_recovery: Option<&ForkRecoveryConfig>,
2100    ) -> Result<()> {
2101        // Try to recover from forks if recovery config is provided
2102        if let Some(recovery) = fork_recovery {
2103            Self::try_recover_checkpoint_fork(checkpoint_store, recovery)?;
2104            Self::try_recover_transaction_fork(checkpoint_store, recovery)?;
2105        }
2106
2107        if let Some((checkpoint_seq, checkpoint_digest)) = checkpoint_store
2108            .get_checkpoint_fork_detected()
2109            .map_err(|e| {
2110                error!("Failed to check for checkpoint fork: {:?}", e);
2111                e
2112            })?
2113        {
2114            Self::handle_checkpoint_fork(
2115                checkpoint_seq,
2116                checkpoint_digest,
2117                checkpoint_metrics,
2118                fork_recovery,
2119            )
2120            .await?;
2121        }
2122        if let Some((tx_digest, expected_effects, actual_effects)) = checkpoint_store
2123            .get_transaction_fork_detected()
2124            .map_err(|e| {
2125                error!("Failed to check for transaction fork: {:?}", e);
2126                e
2127            })?
2128        {
2129            Self::handle_transaction_fork(
2130                tx_digest,
2131                expected_effects,
2132                actual_effects,
2133                checkpoint_metrics,
2134                fork_recovery,
2135            )
2136            .await?;
2137        }
2138
2139        Ok(())
2140    }
2141
2142    fn try_recover_checkpoint_fork(
2143        checkpoint_store: &CheckpointStore,
2144        recovery: &ForkRecoveryConfig,
2145    ) -> Result<()> {
2146        // If configured overrides include a checkpoint whose locally computed digest mismatches,
2147        // clear locally computed checkpoints from that sequence (inclusive).
2148        for (seq, expected_digest_str) in &recovery.checkpoint_overrides {
2149            let Ok(expected_digest) = CheckpointDigest::from_str(expected_digest_str) else {
2150                anyhow::bail!(
2151                    "Invalid checkpoint digest override for seq {}: {}",
2152                    seq,
2153                    expected_digest_str
2154                );
2155            };
2156
2157            if let Some(local_summary) = checkpoint_store.get_locally_computed_checkpoint(*seq)? {
2158                let local_digest = sui_types::message_envelope::Message::digest(&local_summary);
2159                if local_digest != expected_digest {
2160                    info!(
2161                        seq,
2162                        local = %Self::get_digest_prefix(local_digest),
2163                        expected = %Self::get_digest_prefix(expected_digest),
2164                        "Fork recovery: clearing locally_computed_checkpoints from {} due to digest mismatch",
2165                        seq
2166                    );
2167                    checkpoint_store
2168                        .clear_locally_computed_checkpoints_from(*seq)
2169                        .context(
2170                            "Failed to clear locally computed checkpoints from override seq",
2171                        )?;
2172                }
2173            }
2174        }
2175
2176        if let Some((checkpoint_seq, checkpoint_digest)) =
2177            checkpoint_store.get_checkpoint_fork_detected()?
2178            && recovery.checkpoint_overrides.contains_key(&checkpoint_seq)
2179        {
2180            info!(
2181                "Fork recovery enabled: clearing checkpoint fork at seq {} with digest {:?}",
2182                checkpoint_seq, checkpoint_digest
2183            );
2184            checkpoint_store
2185                .clear_checkpoint_fork_detected()
2186                .expect("Failed to clear checkpoint fork detected marker");
2187        }
2188        Ok(())
2189    }
2190
2191    fn try_recover_transaction_fork(
2192        checkpoint_store: &CheckpointStore,
2193        recovery: &ForkRecoveryConfig,
2194    ) -> Result<()> {
2195        if recovery.transaction_overrides.is_empty() {
2196            return Ok(());
2197        }
2198
2199        if let Some((tx_digest, _, _)) = checkpoint_store.get_transaction_fork_detected()?
2200            && recovery
2201                .transaction_overrides
2202                .contains_key(&tx_digest.to_string())
2203        {
2204            info!(
2205                "Fork recovery enabled: clearing transaction fork for tx {:?}",
2206                tx_digest
2207            );
2208            checkpoint_store
2209                .clear_transaction_fork_detected()
2210                .expect("Failed to clear transaction fork detected marker");
2211        }
2212        Ok(())
2213    }
2214
2215    fn get_current_timestamp() -> u64 {
2216        std::time::SystemTime::now()
2217            .duration_since(std::time::SystemTime::UNIX_EPOCH)
2218            .unwrap()
2219            .as_secs()
2220    }
2221
2222    async fn handle_checkpoint_fork(
2223        checkpoint_seq: u64,
2224        checkpoint_digest: CheckpointDigest,
2225        checkpoint_metrics: &CheckpointMetrics,
2226        fork_recovery: Option<&ForkRecoveryConfig>,
2227    ) -> Result<()> {
2228        checkpoint_metrics
2229            .checkpoint_fork_crash_mode
2230            .with_label_values(&[
2231                &checkpoint_seq.to_string(),
2232                &Self::get_digest_prefix(checkpoint_digest),
2233                &Self::get_current_timestamp().to_string(),
2234            ])
2235            .set(1);
2236
2237        let behavior = fork_recovery
2238            .map(|fr| fr.fork_crash_behavior)
2239            .unwrap_or_default();
2240
2241        match behavior {
2242            ForkCrashBehavior::AwaitForkRecovery => {
2243                error!(
2244                    checkpoint_seq = checkpoint_seq,
2245                    checkpoint_digest = ?checkpoint_digest,
2246                    "Checkpoint fork detected! Node startup halted. Sleeping indefinitely."
2247                );
2248                futures::future::pending::<()>().await;
2249                unreachable!("pending() should never return");
2250            }
2251            ForkCrashBehavior::ReturnError => {
2252                error!(
2253                    checkpoint_seq = checkpoint_seq,
2254                    checkpoint_digest = ?checkpoint_digest,
2255                    "Checkpoint fork detected! Returning error."
2256                );
2257                Err(anyhow::anyhow!(
2258                    "Checkpoint fork detected! checkpoint_seq: {}, checkpoint_digest: {:?}",
2259                    checkpoint_seq,
2260                    checkpoint_digest
2261                ))
2262            }
2263        }
2264    }
2265
2266    async fn handle_transaction_fork(
2267        tx_digest: TransactionDigest,
2268        expected_effects_digest: TransactionEffectsDigest,
2269        actual_effects_digest: TransactionEffectsDigest,
2270        checkpoint_metrics: &CheckpointMetrics,
2271        fork_recovery: Option<&ForkRecoveryConfig>,
2272    ) -> Result<()> {
2273        checkpoint_metrics
2274            .transaction_fork_crash_mode
2275            .with_label_values(&[
2276                &Self::get_digest_prefix(tx_digest),
2277                &Self::get_digest_prefix(expected_effects_digest),
2278                &Self::get_digest_prefix(actual_effects_digest),
2279                &Self::get_current_timestamp().to_string(),
2280            ])
2281            .set(1);
2282
2283        let behavior = fork_recovery
2284            .map(|fr| fr.fork_crash_behavior)
2285            .unwrap_or_default();
2286
2287        match behavior {
2288            ForkCrashBehavior::AwaitForkRecovery => {
2289                error!(
2290                    tx_digest = ?tx_digest,
2291                    expected_effects_digest = ?expected_effects_digest,
2292                    actual_effects_digest = ?actual_effects_digest,
2293                    "Transaction fork detected! Node startup halted. Sleeping indefinitely."
2294                );
2295                futures::future::pending::<()>().await;
2296                unreachable!("pending() should never return");
2297            }
2298            ForkCrashBehavior::ReturnError => {
2299                error!(
2300                    tx_digest = ?tx_digest,
2301                    expected_effects_digest = ?expected_effects_digest,
2302                    actual_effects_digest = ?actual_effects_digest,
2303                    "Transaction fork detected! Returning error."
2304                );
2305                Err(anyhow::anyhow!(
2306                    "Transaction fork detected! tx_digest: {:?}, expected_effects: {:?}, actual_effects: {:?}",
2307                    tx_digest,
2308                    expected_effects_digest,
2309                    actual_effects_digest
2310                ))
2311            }
2312        }
2313    }
2314}
2315
2316#[cfg(not(msim))]
2317impl SuiNode {
2318    async fn fetch_jwks(
2319        _authority: AuthorityName,
2320        provider: &OIDCProvider,
2321    ) -> SuiResult<Vec<(JwkId, JWK)>> {
2322        use fastcrypto_zkp::bn254::zk_login::fetch_jwks;
2323        use sui_types::error::SuiErrorKind;
2324        let client = reqwest::Client::new();
2325        fetch_jwks(provider, &client, true)
2326            .await
2327            .map_err(|_| SuiErrorKind::JWKRetrievalError.into())
2328    }
2329}
2330
2331#[cfg(msim)]
2332impl SuiNode {
2333    pub fn get_sim_node_id(&self) -> sui_simulator::task::NodeId {
2334        self.sim_state.sim_node.id()
2335    }
2336
2337    pub fn set_safe_mode_expected(&self, new_value: bool) {
2338        info!("Setting safe mode expected to {}", new_value);
2339        self.sim_state
2340            .sim_safe_mode_expected
2341            .store(new_value, Ordering::Relaxed);
2342    }
2343
2344    #[allow(unused_variables)]
2345    async fn fetch_jwks(
2346        authority: AuthorityName,
2347        provider: &OIDCProvider,
2348    ) -> SuiResult<Vec<(JwkId, JWK)>> {
2349        get_jwk_injector()(authority, provider)
2350    }
2351}
2352
2353enum SpawnOnce {
2354    // Mutex is only needed to make SpawnOnce Send
2355    Unstarted(oneshot::Receiver<()>, Mutex<BoxFuture<'static, ()>>),
2356    #[allow(unused)]
2357    Started(JoinHandle<()>),
2358}
2359
2360impl SpawnOnce {
2361    pub fn new(
2362        ready_rx: oneshot::Receiver<()>,
2363        future: impl Future<Output = ()> + Send + 'static,
2364    ) -> Self {
2365        Self::Unstarted(ready_rx, Mutex::new(Box::pin(future)))
2366    }
2367
2368    pub async fn start(self) -> Self {
2369        match self {
2370            Self::Unstarted(ready_rx, future) => {
2371                let future = future.into_inner();
2372                let handle = tokio::spawn(future);
2373                ready_rx.await.unwrap();
2374                Self::Started(handle)
2375            }
2376            Self::Started(_) => self,
2377        }
2378    }
2379}
2380
2381/// Updates trusted peer addresses in the p2p network.
2382fn update_peer_addresses(
2383    config: &NodeConfig,
2384    endpoint_manager: &EndpointManager,
2385    epoch_start_state: &EpochStartSystemState,
2386) {
2387    for (peer_id, address) in
2388        epoch_start_state.get_validator_as_p2p_peers(config.protocol_public_key())
2389    {
2390        endpoint_manager
2391            .update_endpoint(
2392                EndpointId::P2p(peer_id),
2393                AddressSource::Chain,
2394                vec![address],
2395            )
2396            .expect("Updating peer addresses should not fail");
2397    }
2398}
2399
2400fn build_kv_store(
2401    state: &Arc<AuthorityState>,
2402    config: &NodeConfig,
2403    registry: &Registry,
2404) -> Result<Arc<TransactionKeyValueStore>> {
2405    let metrics = KeyValueStoreMetrics::new(registry);
2406    let db_store = TransactionKeyValueStore::new("rocksdb", metrics.clone(), state.clone());
2407
2408    let base_url = &config.transaction_kv_store_read_config.base_url;
2409
2410    if base_url.is_empty() {
2411        info!("no http kv store url provided, using local db only");
2412        return Ok(Arc::new(db_store));
2413    }
2414
2415    let base_url: url::Url = base_url.parse().tap_err(|e| {
2416        error!(
2417            "failed to parse config.transaction_kv_store_config.base_url ({:?}) as url: {}",
2418            base_url, e
2419        )
2420    })?;
2421
2422    let network_str = match state.get_chain_identifier().chain() {
2423        Chain::Mainnet => "/mainnet",
2424        _ => {
2425            info!("using local db only for kv store");
2426            return Ok(Arc::new(db_store));
2427        }
2428    };
2429
2430    let base_url = base_url.join(network_str)?.to_string();
2431    let http_store = HttpKVStore::new_kv(
2432        &base_url,
2433        config.transaction_kv_store_read_config.cache_size,
2434        metrics.clone(),
2435    )?;
2436    info!("using local key-value store with fallback to http key-value store");
2437    Ok(Arc::new(FallbackTransactionKVStore::new_kv(
2438        db_store,
2439        http_store,
2440        metrics,
2441        "json_rpc_fallback",
2442    )))
2443}
2444
2445async fn build_http_servers(
2446    state: Arc<AuthorityState>,
2447    store: RocksDbStore,
2448    transaction_orchestrator: &Option<Arc<TransactionOrchestrator<NetworkAuthorityClient>>>,
2449    config: &NodeConfig,
2450    prometheus_registry: &Registry,
2451    server_version: ServerVersion,
2452    node_role: NodeRole,
2453) -> Result<(HttpServers, Option<tokio::sync::mpsc::Sender<Checkpoint>>)> {
2454    // Validators do not expose these APIs
2455    if !node_role.should_run_rpc_servers() {
2456        return Ok((HttpServers::default(), None));
2457    }
2458
2459    info!("starting rpc service with config: {:?}", config.rpc);
2460
2461    let mut router = axum::Router::new();
2462
2463    let json_rpc_router = {
2464        let traffic_controller = state.traffic_controller.clone();
2465        let mut server = JsonRpcServerBuilder::new(
2466            env!("CARGO_PKG_VERSION"),
2467            prometheus_registry,
2468            traffic_controller,
2469            config.policy_config.clone(),
2470        );
2471
2472        let kv_store = build_kv_store(&state, config, prometheus_registry)?;
2473
2474        let metrics = Arc::new(JsonRpcMetrics::new(prometheus_registry));
2475        server.register_module(ReadApi::new(
2476            state.clone(),
2477            kv_store.clone(),
2478            metrics.clone(),
2479        ))?;
2480        server.register_module(CoinReadApi::new(
2481            state.clone(),
2482            kv_store.clone(),
2483            metrics.clone(),
2484        ))?;
2485
2486        // if run_with_range is enabled we want to prevent any transactions
2487        // run_with_range = None is normal operating conditions
2488        if config.run_with_range.is_none() {
2489            server.register_module(TransactionBuilderApi::new(state.clone()))?;
2490        }
2491        server.register_module(GovernanceReadApi::new(state.clone(), metrics.clone()))?;
2492        server.register_module(BridgeReadApi::new(state.clone(), metrics.clone()))?;
2493
2494        if let Some(transaction_orchestrator) = transaction_orchestrator {
2495            server.register_module(TransactionExecutionApi::new(
2496                state.clone(),
2497                transaction_orchestrator.clone(),
2498                metrics.clone(),
2499            ))?;
2500        }
2501
2502        let name_service_config =
2503            if let (Some(package_address), Some(registry_id), Some(reverse_registry_id)) = (
2504                config.name_service_package_address,
2505                config.name_service_registry_id,
2506                config.name_service_reverse_registry_id,
2507            ) {
2508                sui_name_service::NameServiceConfig::new(
2509                    package_address,
2510                    registry_id,
2511                    reverse_registry_id,
2512                )
2513            } else {
2514                match state.get_chain_identifier().chain() {
2515                    Chain::Mainnet => sui_name_service::NameServiceConfig::mainnet(),
2516                    Chain::Testnet => sui_name_service::NameServiceConfig::testnet(),
2517                    Chain::Unknown => sui_name_service::NameServiceConfig::default(),
2518                }
2519            };
2520
2521        server.register_module(IndexerApi::new(
2522            state.clone(),
2523            ReadApi::new(state.clone(), kv_store.clone(), metrics.clone()),
2524            kv_store,
2525            name_service_config,
2526            metrics,
2527            config.indexer_max_subscriptions,
2528        ))?;
2529        server.register_module(MoveUtils::new(state.clone()))?;
2530
2531        let server_type = config.jsonrpc_server_type();
2532
2533        server.to_router(server_type).await?
2534    };
2535
2536    router = router.merge(json_rpc_router);
2537
2538    let (subscription_service_checkpoint_sender, subscription_service_handle) =
2539        SubscriptionService::build(prometheus_registry);
2540    let rpc_router = {
2541        let mut rpc_service =
2542            sui_rpc_api::RpcService::new(Arc::new(RestReadStore::new(state.clone(), store)));
2543        rpc_service.with_server_version(server_version);
2544
2545        if let Some(config) = config.rpc.clone() {
2546            rpc_service.with_config(config);
2547        }
2548
2549        rpc_service.with_metrics(RpcMetrics::new(prometheus_registry));
2550        rpc_service.with_subscription_service(subscription_service_handle);
2551
2552        if let Some(transaction_orchestrator) = transaction_orchestrator {
2553            rpc_service.with_executor(transaction_orchestrator.clone())
2554        }
2555
2556        rpc_service.into_router().await
2557    };
2558
2559    let layers = ServiceBuilder::new()
2560        .map_request(|mut request: axum::http::Request<_>| {
2561            if let Some(connect_info) = request.extensions().get::<sui_http::ConnectInfo>() {
2562                let axum_connect_info = axum::extract::ConnectInfo(connect_info.remote_addr);
2563                request.extensions_mut().insert(axum_connect_info);
2564            }
2565            request
2566        })
2567        .layer(axum::middleware::from_fn(server_timing_middleware))
2568        // Setup a permissive CORS policy
2569        .layer(
2570            tower_http::cors::CorsLayer::new()
2571                .allow_methods([http::Method::GET, http::Method::POST])
2572                .allow_origin(tower_http::cors::Any)
2573                .allow_headers(tower_http::cors::Any)
2574                .expose_headers(tower_http::cors::Any),
2575        );
2576
2577    router = router.merge(rpc_router).layer(layers);
2578
2579    let https = if let Some((tls_config, https_address)) = config
2580        .rpc()
2581        .and_then(|config| config.tls_config().map(|tls| (tls, config.https_address())))
2582    {
2583        let https = sui_http::Builder::new()
2584            .tls_single_cert(tls_config.cert(), tls_config.key())
2585            .and_then(|builder| builder.serve(https_address, router.clone()))
2586            .map_err(|e| anyhow::anyhow!(e))?;
2587
2588        info!(
2589            https_address =? https.local_addr(),
2590            "HTTPS rpc server listening on {}",
2591            https.local_addr()
2592        );
2593
2594        Some(https)
2595    } else {
2596        None
2597    };
2598
2599    let http = sui_http::Builder::new()
2600        .serve(&config.json_rpc_address, router)
2601        .map_err(|e| anyhow::anyhow!(e))?;
2602
2603    info!(
2604        http_address =? http.local_addr(),
2605        "HTTP rpc server listening on {}",
2606        http.local_addr()
2607    );
2608
2609    Ok((
2610        HttpServers {
2611            http: Some(http),
2612            https,
2613        },
2614        Some(subscription_service_checkpoint_sender),
2615    ))
2616}
2617
2618#[cfg(not(test))]
2619fn max_tx_per_checkpoint(protocol_config: &ProtocolConfig) -> usize {
2620    protocol_config.max_transactions_per_checkpoint() as usize
2621}
2622
2623#[cfg(test)]
2624fn max_tx_per_checkpoint(_: &ProtocolConfig) -> usize {
2625    2
2626}
2627
2628#[derive(Default)]
2629struct HttpServers {
2630    #[allow(unused)]
2631    http: Option<sui_http::ServerHandle>,
2632    #[allow(unused)]
2633    https: Option<sui_http::ServerHandle>,
2634}
2635
2636#[cfg(test)]
2637mod tests {
2638    use super::*;
2639    use prometheus::Registry;
2640    use std::collections::BTreeMap;
2641    use sui_config::node::{ForkCrashBehavior, ForkRecoveryConfig};
2642    use sui_core::checkpoints::{CheckpointMetrics, CheckpointStore};
2643    use sui_types::digests::{CheckpointDigest, TransactionDigest, TransactionEffectsDigest};
2644
2645    #[tokio::test]
2646    async fn test_fork_error_and_recovery_both_paths() {
2647        let checkpoint_store = CheckpointStore::new_for_tests();
2648        let checkpoint_metrics = CheckpointMetrics::new(&Registry::new());
2649
2650        // ---------- Checkpoint fork path ----------
2651        let seq_num = 42;
2652        let digest = CheckpointDigest::random();
2653        checkpoint_store
2654            .record_checkpoint_fork_detected(seq_num, digest)
2655            .unwrap();
2656
2657        let fork_recovery = ForkRecoveryConfig {
2658            transaction_overrides: Default::default(),
2659            checkpoint_overrides: Default::default(),
2660            fork_crash_behavior: ForkCrashBehavior::ReturnError,
2661        };
2662
2663        let r = SuiNode::check_and_recover_forks(
2664            &checkpoint_store,
2665            &checkpoint_metrics,
2666            Some(&fork_recovery),
2667        )
2668        .await;
2669        assert!(r.is_err());
2670        assert!(
2671            r.unwrap_err()
2672                .to_string()
2673                .contains("Checkpoint fork detected")
2674        );
2675
2676        let mut checkpoint_overrides = BTreeMap::new();
2677        checkpoint_overrides.insert(seq_num, digest.to_string());
2678        let fork_recovery_with_override = ForkRecoveryConfig {
2679            transaction_overrides: Default::default(),
2680            checkpoint_overrides,
2681            fork_crash_behavior: ForkCrashBehavior::ReturnError,
2682        };
2683        let r = SuiNode::check_and_recover_forks(
2684            &checkpoint_store,
2685            &checkpoint_metrics,
2686            Some(&fork_recovery_with_override),
2687        )
2688        .await;
2689        assert!(r.is_ok());
2690        assert!(
2691            checkpoint_store
2692                .get_checkpoint_fork_detected()
2693                .unwrap()
2694                .is_none()
2695        );
2696
2697        // ---------- Transaction fork path ----------
2698        let tx_digest = TransactionDigest::random();
2699        let expected_effects = TransactionEffectsDigest::random();
2700        let actual_effects = TransactionEffectsDigest::random();
2701        checkpoint_store
2702            .record_transaction_fork_detected(tx_digest, expected_effects, actual_effects)
2703            .unwrap();
2704
2705        let fork_recovery = ForkRecoveryConfig {
2706            transaction_overrides: Default::default(),
2707            checkpoint_overrides: Default::default(),
2708            fork_crash_behavior: ForkCrashBehavior::ReturnError,
2709        };
2710        let r = SuiNode::check_and_recover_forks(
2711            &checkpoint_store,
2712            &checkpoint_metrics,
2713            Some(&fork_recovery),
2714        )
2715        .await;
2716        assert!(r.is_err());
2717        assert!(
2718            r.unwrap_err()
2719                .to_string()
2720                .contains("Transaction fork detected")
2721        );
2722
2723        let mut transaction_overrides = BTreeMap::new();
2724        transaction_overrides.insert(tx_digest.to_string(), actual_effects.to_string());
2725        let fork_recovery_with_override = ForkRecoveryConfig {
2726            transaction_overrides,
2727            checkpoint_overrides: Default::default(),
2728            fork_crash_behavior: ForkCrashBehavior::ReturnError,
2729        };
2730        let r = SuiNode::check_and_recover_forks(
2731            &checkpoint_store,
2732            &checkpoint_metrics,
2733            Some(&fork_recovery_with_override),
2734        )
2735        .await;
2736        assert!(r.is_ok());
2737        assert!(
2738            checkpoint_store
2739                .get_transaction_fork_detected()
2740                .unwrap()
2741                .is_none()
2742        );
2743    }
2744}