sui_core/consensus_manager/
mod.rs1use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore;
4use crate::consensus_adapter::{BlockStatusReceiver, ConsensusClient};
5use crate::consensus_handler::{
6 ConsensusBlockHandler, ConsensusHandlerInitializer, MysticetiConsensusHandler,
7};
8use crate::consensus_validator::SuiTxValidator;
9use crate::mysticeti_adapter::LazyMysticetiClient;
10use arc_swap::ArcSwapOption;
11use async_trait::async_trait;
12use consensus_config::{Committee, NetworkKeyPair, Parameters, ProtocolKeyPair};
13use consensus_core::{
14 Clock, CommitConsumerArgs, CommitConsumerMonitor, CommitIndex, ConsensusAuthority, NetworkType,
15};
16use core::panic;
17use fastcrypto::traits::KeyPair as _;
18use mysten_metrics::{RegistryID, RegistryService};
19use prometheus::{IntGauge, Registry, register_int_gauge_with_registry};
20use std::path::PathBuf;
21use std::sync::Arc;
22use std::time::{Duration, Instant};
23use sui_config::{ConsensusConfig, NodeConfig};
24use sui_protocol_config::ProtocolVersion;
25use sui_types::error::SuiResult;
26use sui_types::messages_consensus::{ConsensusPosition, ConsensusTransaction};
27use sui_types::{
28 committee::EpochId, sui_system_state::epoch_start_sui_system_state::EpochStartSystemStateTrait,
29};
30use tokio::sync::{Mutex, broadcast};
31use tokio::time::{sleep, timeout};
32use tracing::{error, info};
33
34#[cfg(test)]
35#[path = "../unit_tests/consensus_manager_tests.rs"]
36pub mod consensus_manager_tests;
37
38#[derive(PartialEq)]
39enum Running {
40 True(EpochId, ProtocolVersion),
41 False,
42}
43
44pub struct ConsensusManager {
46 consensus_config: ConsensusConfig,
47 protocol_keypair: ProtocolKeyPair,
48 network_keypair: NetworkKeyPair,
49 storage_base_path: PathBuf,
50 metrics: Arc<ConsensusManagerMetrics>,
51 registry_service: RegistryService,
52 authority: ArcSwapOption<(ConsensusAuthority, RegistryID)>,
53
54 client: Arc<LazyMysticetiClient>,
57 consensus_client: Arc<UpdatableConsensusClient>,
58
59 consensus_handler: Mutex<Option<MysticetiConsensusHandler>>,
60
61 #[cfg(test)]
62 pub(crate) consumer_monitor: ArcSwapOption<CommitConsumerMonitor>,
63 #[cfg(not(test))]
64 consumer_monitor: ArcSwapOption<CommitConsumerMonitor>,
65 consumer_monitor_sender: broadcast::Sender<Arc<CommitConsumerMonitor>>,
66
67 running: Mutex<Running>,
68
69 #[cfg(test)]
70 pub(crate) boot_counter: Mutex<u64>,
71 #[cfg(not(test))]
72 boot_counter: Mutex<u64>,
73}
74
75impl ConsensusManager {
76 pub fn new(
77 node_config: &NodeConfig,
78 consensus_config: &ConsensusConfig,
79 registry_service: &RegistryService,
80 consensus_client: Arc<UpdatableConsensusClient>,
81 ) -> Self {
82 let metrics = Arc::new(ConsensusManagerMetrics::new(
83 ®istry_service.default_registry(),
84 ));
85 let client = Arc::new(LazyMysticetiClient::new());
86 let (consumer_monitor_sender, _) = broadcast::channel(1);
87 Self {
88 consensus_config: consensus_config.clone(),
89 protocol_keypair: ProtocolKeyPair::new(node_config.worker_key_pair().copy()),
90 network_keypair: NetworkKeyPair::new(node_config.network_key_pair().copy()),
91 storage_base_path: consensus_config.db_path().to_path_buf(),
92 metrics,
93 registry_service: registry_service.clone(),
94 authority: ArcSwapOption::empty(),
95 client,
96 consensus_client,
97 consensus_handler: Mutex::new(None),
98 consumer_monitor: ArcSwapOption::empty(),
99 consumer_monitor_sender,
100 running: Mutex::new(Running::False),
101 boot_counter: Mutex::new(0),
102 }
103 }
104
105 pub async fn start(
106 &self,
107 node_config: &NodeConfig,
108 epoch_store: Arc<AuthorityPerEpochStore>,
109 consensus_handler_initializer: ConsensusHandlerInitializer,
110 tx_validator: SuiTxValidator,
111 ) {
112 let system_state = epoch_store.epoch_start_state();
113 let committee: Committee = system_state.get_consensus_committee();
114 let epoch = epoch_store.epoch();
115 let protocol_config = epoch_store.protocol_config();
116
117 let start_time = Instant::now();
119 let mut running = self.running.lock().await;
120 if let Running::True(running_epoch, running_version) = *running {
121 error!(
122 "Consensus is already Running for epoch {running_epoch:?} & protocol version {running_version:?} - shutdown first before starting",
123 );
124 return;
125 }
126 *running = Running::True(epoch, protocol_config.version);
127
128 info!(
129 "Starting up consensus for epoch {epoch:?} & protocol version {:?}",
130 protocol_config.version
131 );
132
133 self.consensus_client.set(self.client.clone());
134
135 let consensus_config = node_config
136 .consensus_config()
137 .expect("consensus_config should exist");
138
139 let parameters = Parameters {
140 db_path: self.get_store_path(epoch),
141 ..consensus_config.parameters.clone().unwrap_or_default()
142 };
143
144 let own_protocol_key = self.protocol_keypair.public();
145 let (own_index, _) = committee
146 .authorities()
147 .find(|(_, a)| a.protocol_key == own_protocol_key)
148 .expect("Own authority should be among the consensus authorities!");
149
150 let registry = Registry::new_custom(Some("consensus".to_string()), None).unwrap();
151
152 let consensus_handler = consensus_handler_initializer.new_consensus_handler();
153
154 let num_prior_commits = protocol_config.consensus_num_requested_prior_commits_at_startup();
155 let last_processed_commit_index =
156 consensus_handler.last_processed_subdag_index() as CommitIndex;
157 let replay_after_commit_index =
158 last_processed_commit_index.saturating_sub(num_prior_commits);
159
160 let (commit_consumer, commit_receiver, block_receiver) =
161 CommitConsumerArgs::new(replay_after_commit_index, last_processed_commit_index);
162 let monitor = commit_consumer.monitor();
163
164 let consensus_block_handler = ConsensusBlockHandler::new(
166 epoch_store.clone(),
167 consensus_handler.execution_scheduler_sender().clone(),
168 consensus_handler_initializer.backpressure_subscriber(),
169 consensus_handler_initializer.metrics().clone(),
170 );
171 let handler = MysticetiConsensusHandler::new(
172 last_processed_commit_index,
173 consensus_handler,
174 consensus_block_handler,
175 commit_receiver,
176 block_receiver,
177 monitor.clone(),
178 );
179 let mut consensus_handler = self.consensus_handler.lock().await;
180 *consensus_handler = Some(handler);
181
182 let participated_on_previous_run =
186 if let Some(previous_monitor) = self.consumer_monitor.swap(Some(monitor.clone())) {
187 previous_monitor.highest_handled_commit() > 0
188 } else {
189 false
190 };
191
192 let mut boot_counter = self.boot_counter.lock().await;
197 if participated_on_previous_run {
198 *boot_counter += 1;
199 } else {
200 info!(
201 "Node has not participated in previous epoch consensus. Boot counter ({}) will not increment.",
202 *boot_counter
203 );
204 }
205
206 let authority = ConsensusAuthority::start(
207 NetworkType::Tonic,
208 epoch_store.epoch_start_config().epoch_start_timestamp_ms(),
209 own_index,
210 committee.clone(),
211 parameters.clone(),
212 protocol_config.clone(),
213 self.protocol_keypair.clone(),
214 self.network_keypair.clone(),
215 Arc::new(Clock::default()),
216 Arc::new(tx_validator.clone()),
217 commit_consumer,
218 registry.clone(),
219 *boot_counter,
220 )
221 .await;
222 let client = authority.transaction_client();
223
224 let registry_id = self.registry_service.add(registry.clone());
225
226 let registered_authority = Arc::new((authority, registry_id));
227 self.authority.swap(Some(registered_authority.clone()));
228
229 self.client.set(client);
231
232 let _ = self.consumer_monitor_sender.send(monitor);
234
235 let elapsed = start_time.elapsed().as_secs_f64();
236 self.metrics.start_latency.set(elapsed as i64);
237
238 tracing::info!(
239 "Started consensus for epoch {} & protocol version {:?} completed - took {} seconds",
240 epoch,
241 protocol_config.version,
242 elapsed
243 );
244 }
245
246 pub async fn shutdown(&self) {
247 info!("Shutting down consensus ...");
248
249 let start_time = Instant::now();
251 let mut running = self.running.lock().await;
252 let (shutdown_epoch, shutdown_version) = match *running {
253 Running::True(epoch, version) => {
254 tracing::info!(
255 "Shutting down consensus for epoch {epoch:?} & protocol version {version:?}"
256 );
257 *running = Running::False;
258 (epoch, version)
259 }
260 Running::False => {
261 error!("Consensus shutdown was called but consensus is not running");
262 return;
263 }
264 };
265
266 self.client.clear();
268
269 let r = self.authority.swap(None).unwrap();
271 let Ok((authority, registry_id)) = Arc::try_unwrap(r) else {
272 panic!("Failed to retrieve the Mysticeti authority");
273 };
274
275 authority.stop().await;
277
278 let mut consensus_handler = self.consensus_handler.lock().await;
280 if let Some(mut handler) = consensus_handler.take() {
281 handler.abort().await;
282 }
283
284 self.registry_service.remove(registry_id);
286
287 self.consensus_client.clear();
288
289 let elapsed = start_time.elapsed().as_secs_f64();
290 self.metrics.shutdown_latency.set(elapsed as i64);
291
292 tracing::info!(
293 "Consensus stopped for epoch {shutdown_epoch:?} & protocol version {shutdown_version:?} is complete - took {} seconds",
294 elapsed
295 );
296 }
297
298 pub async fn is_running(&self) -> bool {
299 let running = self.running.lock().await;
300 matches!(*running, Running::True(_, _))
301 }
302
303 pub fn replay_waiter(&self) -> ReplayWaiter {
304 let consumer_monitor_receiver = self.consumer_monitor_sender.subscribe();
305 ReplayWaiter::new(consumer_monitor_receiver)
306 }
307
308 pub fn get_storage_base_path(&self) -> PathBuf {
309 self.consensus_config.db_path().to_path_buf()
310 }
311
312 fn get_store_path(&self, epoch: EpochId) -> PathBuf {
313 let mut store_path = self.storage_base_path.clone();
314 store_path.push(format!("{}", epoch));
315 store_path
316 }
317}
318
319#[derive(Default)]
322pub struct UpdatableConsensusClient {
323 client: ArcSwapOption<Arc<dyn ConsensusClient>>,
325}
326
327impl UpdatableConsensusClient {
328 pub fn new() -> Self {
329 Self {
330 client: ArcSwapOption::empty(),
331 }
332 }
333
334 async fn get(&self) -> Arc<Arc<dyn ConsensusClient>> {
335 const START_TIMEOUT: Duration = Duration::from_secs(300);
336 const RETRY_INTERVAL: Duration = Duration::from_millis(100);
337 if let Ok(client) = timeout(START_TIMEOUT, async {
338 loop {
339 let Some(client) = self.client.load_full() else {
340 sleep(RETRY_INTERVAL).await;
341 continue;
342 };
343 return client;
344 }
345 })
346 .await
347 {
348 return client;
349 }
350
351 panic!(
352 "Timed out after {:?} waiting for Consensus to start!",
353 START_TIMEOUT,
354 );
355 }
356
357 pub fn set(&self, client: Arc<dyn ConsensusClient>) {
358 self.client.store(Some(Arc::new(client)));
359 }
360
361 pub fn clear(&self) {
362 self.client.store(None);
363 }
364}
365
366#[async_trait]
367impl ConsensusClient for UpdatableConsensusClient {
368 async fn submit(
369 &self,
370 transactions: &[ConsensusTransaction],
371 epoch_store: &Arc<AuthorityPerEpochStore>,
372 ) -> SuiResult<(Vec<ConsensusPosition>, BlockStatusReceiver)> {
373 let client = self.get().await;
374 client.submit(transactions, epoch_store).await
375 }
376}
377
378pub struct ReplayWaiter {
380 consumer_monitor_receiver: broadcast::Receiver<Arc<CommitConsumerMonitor>>,
381}
382
383impl ReplayWaiter {
384 pub(crate) fn new(
385 consumer_monitor_receiver: broadcast::Receiver<Arc<CommitConsumerMonitor>>,
386 ) -> Self {
387 Self {
388 consumer_monitor_receiver,
389 }
390 }
391
392 pub(crate) async fn wait_for_replay(mut self) {
393 loop {
394 info!("Waiting for consensus to start replaying ...");
395 let Ok(monitor) = self.consumer_monitor_receiver.recv().await else {
396 continue;
397 };
398 info!("Waiting for consensus handler to finish replaying ...");
399 monitor
400 .replay_to_consumer_last_processed_commit_complete()
401 .await;
402 break;
403 }
404 }
405}
406
407impl Clone for ReplayWaiter {
408 fn clone(&self) -> Self {
409 Self {
410 consumer_monitor_receiver: self.consumer_monitor_receiver.resubscribe(),
411 }
412 }
413}
414
415pub struct ConsensusManagerMetrics {
416 start_latency: IntGauge,
417 shutdown_latency: IntGauge,
418}
419
420impl ConsensusManagerMetrics {
421 pub fn new(registry: &Registry) -> Self {
422 Self {
423 start_latency: register_int_gauge_with_registry!(
424 "consensus_manager_start_latency",
425 "The latency of starting up consensus nodes",
426 registry,
427 )
428 .unwrap(),
429 shutdown_latency: register_int_gauge_with_registry!(
430 "consensus_manager_shutdown_latency",
431 "The latency of shutting down consensus nodes",
432 registry,
433 )
434 .unwrap(),
435 }
436 }
437}