sui_bridge/
node.rs

1// Copyright (c) Mysten Labs, Inc.
2// SPDX-License-Identifier: Apache-2.0
3
4use crate::action_executor::BridgeActionExecutor;
5use crate::client::bridge_authority_aggregator::BridgeAuthorityAggregator;
6use crate::config::{BridgeClientConfig, BridgeNodeConfig, WatchdogConfig};
7use crate::crypto::BridgeAuthorityPublicKeyBytes;
8use crate::eth_syncer::EthSyncer;
9use crate::events::init_all_struct_tags;
10use crate::metrics::BridgeMetrics;
11use crate::monitor::{self, BridgeMonitor};
12use crate::orchestrator::BridgeOrchestrator;
13use crate::server::handler::BridgeRequestHandler;
14use crate::server::{BridgeNodePublicMetadata, run_server};
15use crate::storage::BridgeOrchestratorTables;
16use crate::sui_bridge_watchdog::eth_bridge_status::EthBridgeStatus;
17use crate::sui_bridge_watchdog::eth_vault_balance::{EthereumVaultBalance, VaultAsset};
18use crate::sui_bridge_watchdog::metrics::WatchdogMetrics;
19use crate::sui_bridge_watchdog::sui_bridge_status::SuiBridgeStatus;
20use crate::sui_bridge_watchdog::total_supplies::TotalSupplies;
21use crate::sui_bridge_watchdog::{BridgeWatchDog, Observable};
22use crate::sui_client::SuiBridgeClient;
23use crate::sui_syncer::SuiSyncer;
24use crate::types::BridgeCommittee;
25use crate::utils::{
26    EthProvider, get_committee_voting_power_by_name, get_eth_contract_addresses,
27    get_validator_names_by_pub_keys,
28};
29use alloy::primitives::Address as EthAddress;
30use arc_swap::ArcSwap;
31use mysten_common::ZipDebugEqIteratorExt;
32use mysten_metrics::spawn_logged_monitored_task;
33use std::collections::{BTreeMap, HashMap};
34use std::net::{IpAddr, Ipv4Addr, SocketAddr};
35use std::sync::Arc;
36use std::time::Duration;
37use sui_types::Identifier;
38use sui_types::bridge::{
39    BRIDGE_COMMITTEE_MODULE_NAME, BRIDGE_LIMITER_MODULE_NAME, BRIDGE_MODULE_NAME,
40    BRIDGE_TREASURY_MODULE_NAME,
41};
42use sui_types::event::EventID;
43use tokio::task::JoinHandle;
44use tracing::info;
45
46pub async fn run_bridge_node(
47    config: BridgeNodeConfig,
48    metadata: BridgeNodePublicMetadata,
49    prometheus_registry: prometheus::Registry,
50) -> anyhow::Result<JoinHandle<()>> {
51    init_all_struct_tags();
52    let metrics = Arc::new(BridgeMetrics::new(&prometheus_registry));
53    let watchdog_config = config.watchdog_config.clone();
54    let (server_config, client_config) = config.validate(metrics.clone()).await?;
55    let sui_chain_identifier = server_config
56        .sui_client
57        .get_chain_identifier()
58        .await
59        .map_err(|e| anyhow::anyhow!("Failed to get sui chain identifier: {:?}", e))?;
60    let eth_chain_identifier = server_config
61        .eth_client
62        .get_chain_id()
63        .await
64        .map_err(|e| anyhow::anyhow!("Failed to get eth chain identifier: {:?}", e))?;
65    prometheus_registry
66        .register(mysten_metrics::bridge_uptime_metric(
67            "bridge",
68            metadata.version,
69            &sui_chain_identifier,
70            &eth_chain_identifier.to_string(),
71            client_config.is_some(),
72        ))
73        .unwrap();
74
75    let committee = Arc::new(
76        server_config
77            .sui_client
78            .get_bridge_committee()
79            .await
80            .expect("Failed to get committee"),
81    );
82    let mut handles = vec![];
83
84    // Start watchdog
85    let eth_provider = server_config.eth_client.provider();
86    let eth_bridge_proxy_address = server_config.eth_bridge_proxy_address;
87    let sui_client = server_config.sui_client.clone();
88    handles.push(spawn_logged_monitored_task!(start_watchdog(
89        watchdog_config,
90        &prometheus_registry,
91        eth_provider,
92        eth_bridge_proxy_address,
93        sui_client
94    )));
95
96    // Update voting right metrics
97    // Before reconfiguration happens we only set it once when the node starts
98    let sui_system = server_config
99        .sui_client
100        .grpc_client()
101        .get_system_state_summary(None)
102        .await?;
103
104    // Start Client
105    if let Some(client_config) = client_config {
106        let committee_keys_to_names =
107            Arc::new(get_validator_names_by_pub_keys(&committee, &sui_system).await);
108        let client_components = start_client_components(
109            client_config,
110            committee.clone(),
111            committee_keys_to_names,
112            metrics.clone(),
113        )
114        .await?;
115        handles.extend(client_components);
116    }
117
118    let committee_name_mapping = get_committee_voting_power_by_name(&committee, &sui_system).await;
119    for (name, voting_power) in committee_name_mapping.into_iter() {
120        metrics
121            .current_bridge_voting_rights
122            .with_label_values(&[name.as_str()])
123            .set(voting_power as i64);
124    }
125
126    // Start Server
127    let socket_address = SocketAddr::new(
128        IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
129        server_config.server_listen_port,
130    );
131    Ok(run_server(
132        &socket_address,
133        BridgeRequestHandler::new(
134            server_config.key,
135            server_config.sui_client,
136            server_config.eth_client,
137            server_config.approved_governance_actions,
138        ),
139        metrics,
140        Arc::new(metadata),
141    ))
142}
143
144async fn start_watchdog(
145    watchdog_config: Option<WatchdogConfig>,
146    registry: &prometheus::Registry,
147    eth_provider: EthProvider,
148    eth_bridge_proxy_address: EthAddress,
149    sui_client: Arc<SuiBridgeClient>,
150) {
151    let watchdog_metrics = WatchdogMetrics::new(registry);
152    let (
153        _committee_address,
154        _limiter_address,
155        vault_address,
156        _config_address,
157        weth_address,
158        usdt_address,
159        wbtc_address,
160        lbtc_address,
161    ) = get_eth_contract_addresses(eth_bridge_proxy_address, eth_provider.clone())
162        .await
163        .unwrap_or_else(|e| panic!("get_eth_contract_addresses should not fail: {}", e));
164
165    // If vault_address is zero (can happen due to storage layout mismatch during upgrades),
166    // skip vault balance monitoring but allow node to start for signing server functionality.
167    let vault_monitoring_enabled = !vault_address.is_zero() && !weth_address.is_zero();
168    if !vault_monitoring_enabled {
169        tracing::warn!(
170            "Vault address or token addresses are zero - skipping vault balance monitoring. \
171            This is expected during storage layout mismatch recovery."
172        );
173    }
174
175    let eth_bridge_status = EthBridgeStatus::new(
176        eth_provider.clone(),
177        eth_bridge_proxy_address,
178        watchdog_metrics.eth_bridge_paused.clone(),
179    );
180
181    let sui_bridge_status = SuiBridgeStatus::new(
182        sui_client.clone(),
183        watchdog_metrics.sui_bridge_paused.clone(),
184    );
185
186    let mut observables: Vec<Box<dyn Observable + Send + Sync>> =
187        vec![Box::new(eth_bridge_status), Box::new(sui_bridge_status)];
188
189    // Add vault balance monitors only when addresses are valid
190    if vault_monitoring_enabled {
191        let eth_vault_balance = EthereumVaultBalance::new(
192            eth_provider.clone(),
193            vault_address,
194            weth_address,
195            VaultAsset::WETH,
196            watchdog_metrics.eth_vault_balance.clone(),
197        )
198        .await
199        .unwrap_or_else(|e| panic!("Failed to create eth vault balance: {}", e));
200
201        let usdt_vault_balance = EthereumVaultBalance::new(
202            eth_provider.clone(),
203            vault_address,
204            usdt_address,
205            VaultAsset::USDT,
206            watchdog_metrics.usdt_vault_balance.clone(),
207        )
208        .await
209        .unwrap_or_else(|e| panic!("Failed to create usdt vault balance: {}", e));
210
211        let wbtc_vault_balance = EthereumVaultBalance::new(
212            eth_provider.clone(),
213            vault_address,
214            wbtc_address,
215            VaultAsset::WBTC,
216            watchdog_metrics.wbtc_vault_balance.clone(),
217        )
218        .await
219        .unwrap_or_else(|e| panic!("Failed to create wbtc vault balance: {}", e));
220
221        observables.push(Box::new(eth_vault_balance));
222        observables.push(Box::new(usdt_vault_balance));
223        observables.push(Box::new(wbtc_vault_balance));
224
225        if !lbtc_address.is_zero() {
226            let lbtc_vault_balance = EthereumVaultBalance::new(
227                eth_provider,
228                vault_address,
229                lbtc_address,
230                VaultAsset::LBTC,
231                watchdog_metrics.lbtc_vault_balance.clone(),
232            )
233            .await
234            .unwrap_or_else(|e| panic!("Failed to create lbtc vault balance: {}", e));
235            observables.push(Box::new(lbtc_vault_balance));
236        }
237    }
238
239    if let Some(watchdog_config) = watchdog_config
240        && !watchdog_config.total_supplies.is_empty()
241    {
242        let total_supplies = TotalSupplies::new(
243            sui_client.grpc_client().clone().into_inner(),
244            watchdog_config.total_supplies,
245            watchdog_metrics.total_supplies.clone(),
246        );
247        observables.push(Box::new(total_supplies));
248    }
249
250    BridgeWatchDog::new(observables).run().await
251}
252
253// TODO: is there a way to clean up the overrides after it's stored in DB?
254async fn start_client_components(
255    client_config: BridgeClientConfig,
256    committee: Arc<BridgeCommittee>,
257    committee_keys_to_names: Arc<BTreeMap<BridgeAuthorityPublicKeyBytes, String>>,
258    metrics: Arc<BridgeMetrics>,
259) -> anyhow::Result<Vec<JoinHandle<()>>> {
260    let store: std::sync::Arc<BridgeOrchestratorTables> =
261        BridgeOrchestratorTables::new(&client_config.db_path.join("client"));
262    let sui_modules_to_watch = get_sui_modules_to_watch(
263        &store,
264        client_config.sui_bridge_module_last_processed_event_id_override,
265    );
266
267    let eth_contracts_to_watch = get_eth_contracts_to_watch(
268        &store,
269        &client_config.eth_contracts,
270        client_config.eth_contracts_start_block_fallback,
271        client_config.eth_contracts_start_block_override,
272    );
273
274    let sui_client = client_config.sui_client.clone();
275
276    let last_processed_bridge_event_id = sui_modules_to_watch
277        .get(&BRIDGE_MODULE_NAME.to_owned())
278        .and_then(|opt| *opt);
279
280    let next_sequence_number = get_next_sequence_number(
281        &store,
282        &sui_client,
283        last_processed_bridge_event_id,
284        client_config.sui_bridge_next_sequence_number_override,
285    )
286    .await;
287
288    let mut all_handles = vec![];
289    let (task_handles, eth_events_rx, _) =
290        EthSyncer::new(client_config.eth_client.clone(), eth_contracts_to_watch)
291            .run(metrics.clone())
292            .await
293            .expect("Failed to start eth syncer");
294    all_handles.extend(task_handles);
295
296    let (task_handles, sui_grpc_events_rx) = SuiSyncer::new(
297        client_config.sui_client,
298        sui_modules_to_watch,
299        metrics.clone(),
300    )
301    .run_grpc(
302        client_config.sui_bridge_chain_id,
303        next_sequence_number,
304        Duration::from_secs(2),
305        10,
306    )
307    .await
308    .expect("Failed to start sui syncer");
309    all_handles.extend(task_handles);
310
311    let bridge_auth_agg = Arc::new(ArcSwap::from(Arc::new(BridgeAuthorityAggregator::new(
312        committee,
313        metrics.clone(),
314        committee_keys_to_names,
315    ))));
316    // TODO: should we use one query instead of two?
317    let sui_token_type_tags = sui_client.get_token_id_map().await.unwrap();
318    let is_bridge_paused = sui_client.is_bridge_paused().await.unwrap();
319
320    let (bridge_pause_tx, bridge_pause_rx) = tokio::sync::watch::channel(is_bridge_paused);
321
322    let (eth_monitor_tx, eth_monitor_rx) = mysten_metrics::metered_channel::channel(
323        10000,
324        &mysten_metrics::get_metrics()
325            .unwrap()
326            .channel_inflight
327            .with_label_values(&["eth_monitor_queue"]),
328    );
329
330    let sui_token_type_tags = Arc::new(ArcSwap::from(Arc::new(sui_token_type_tags)));
331    let bridge_action_executor = BridgeActionExecutor::new(
332        sui_client.clone(),
333        bridge_auth_agg.clone(),
334        store.clone(),
335        client_config.key,
336        client_config.sui_address,
337        client_config.gas_object_ref.0,
338        sui_token_type_tags.clone(),
339        bridge_pause_rx,
340        metrics.clone(),
341    )
342    .await;
343
344    let (sui_monitor_tx, sui_monitor_rx) = mysten_metrics::metered_channel::channel(
345        10000,
346        &mysten_metrics::get_metrics()
347            .unwrap()
348            .channel_inflight
349            .with_label_values(&["sui_monitor_queue"]),
350    );
351    tokio::spawn(monitor::subscribe_bridge_events(
352        sui_client.grpc_client().clone().into_inner(),
353        sui_monitor_tx,
354    ));
355    let monitor = BridgeMonitor::new(
356        sui_client.clone(),
357        sui_monitor_rx,
358        eth_monitor_rx,
359        bridge_auth_agg.clone(),
360        bridge_pause_tx,
361        sui_token_type_tags,
362        metrics.clone(),
363    );
364    all_handles.push(spawn_logged_monitored_task!(monitor.run()));
365
366    let orchestrator = BridgeOrchestrator::new(
367        sui_client,
368        sui_grpc_events_rx,
369        eth_events_rx,
370        store.clone(),
371        eth_monitor_tx,
372        metrics,
373    );
374
375    all_handles.extend(orchestrator.run_with_grpc(bridge_action_executor).await);
376    Ok(all_handles)
377}
378
379async fn get_next_sequence_number<C: crate::sui_client::SuiClientInner>(
380    store: &BridgeOrchestratorTables,
381    sui_client: &crate::sui_client::SuiClient<C>,
382    last_processed_bridge_event_id: Option<EventID>,
383    next_sequence_number_override: Option<u64>,
384) -> u64 {
385    if let Some(next_sequence_number_override) = next_sequence_number_override {
386        info!("Overriding next sequence number to {next_sequence_number_override}",);
387        return next_sequence_number_override;
388    }
389
390    if let Ok(Some(sequence_number)) = store.get_sui_sequence_number_cursor() {
391        info!("Using sequence number {sequence_number} from storage",);
392        return sequence_number;
393    }
394
395    if let Some(event_id) = last_processed_bridge_event_id {
396        match sui_client.get_sequence_number_from_event_id(event_id).await {
397            Ok(Some(sequence_number)) => {
398                let next = sequence_number + 1;
399                info!(
400                    ?event_id,
401                    last_processed_seq = sequence_number,
402                    next_seq_to_read = next,
403                    "Migrated from legacy event cursor to sequence number cursor"
404                );
405                return next;
406            }
407            Ok(None) => {
408                info!(
409                    ?event_id,
410                    "Could not extract sequence number from legacy event cursor, starting from 0"
411                );
412            }
413            Err(e) => {
414                info!(
415                    ?event_id,
416                    ?e,
417                    "Failed to get sequence number from legacy event cursor, starting from 0"
418                );
419            }
420        }
421    }
422
423    info!("No cursor found for gRPC syncer, starting from sequence number 0");
424    0
425}
426
427fn get_sui_modules_to_watch(
428    store: &std::sync::Arc<BridgeOrchestratorTables>,
429    sui_bridge_module_last_processed_event_id_override: Option<EventID>,
430) -> HashMap<Identifier, Option<EventID>> {
431    let sui_bridge_modules = vec![
432        BRIDGE_MODULE_NAME.to_owned(),
433        BRIDGE_COMMITTEE_MODULE_NAME.to_owned(),
434        BRIDGE_TREASURY_MODULE_NAME.to_owned(),
435        BRIDGE_LIMITER_MODULE_NAME.to_owned(),
436    ];
437    if let Some(cursor) = sui_bridge_module_last_processed_event_id_override {
438        info!("Overriding cursor for sui bridge modules to {:?}", cursor);
439        return HashMap::from_iter(
440            sui_bridge_modules
441                .iter()
442                .map(|module| (module.clone(), Some(cursor))),
443        );
444    }
445
446    let sui_bridge_module_stored_cursor = store
447        .get_sui_event_cursors(&sui_bridge_modules)
448        .expect("Failed to get eth sui event cursors from storage");
449    let mut sui_modules_to_watch = HashMap::new();
450    for (module_identifier, cursor) in sui_bridge_modules
451        .iter()
452        .zip_debug_eq(sui_bridge_module_stored_cursor)
453    {
454        if cursor.is_none() {
455            info!(
456                "No cursor found for sui bridge module {} in storage or config override, query start from the beginning.",
457                module_identifier
458            );
459        }
460        sui_modules_to_watch.insert(module_identifier.clone(), cursor);
461    }
462    sui_modules_to_watch
463}
464
465fn get_eth_contracts_to_watch(
466    store: &std::sync::Arc<BridgeOrchestratorTables>,
467    eth_contracts: &[EthAddress],
468    eth_contracts_start_block_fallback: u64,
469    eth_contracts_start_block_override: Option<u64>,
470) -> HashMap<EthAddress, u64> {
471    let stored_eth_cursors = store
472        .get_eth_event_cursors(eth_contracts)
473        .expect("Failed to get eth event cursors from storage");
474    let mut eth_contracts_to_watch = HashMap::new();
475    for (contract, stored_cursor) in eth_contracts.iter().zip_debug_eq(stored_eth_cursors) {
476        // start block precedence:
477        // eth_contracts_start_block_override > stored cursor > eth_contracts_start_block_fallback
478        match (eth_contracts_start_block_override, stored_cursor) {
479            (Some(override_), _) => {
480                eth_contracts_to_watch.insert(*contract, override_);
481                info!(
482                    "Overriding cursor for eth bridge contract {} to {}. Stored cursor: {:?}",
483                    contract, override_, stored_cursor
484                );
485            }
486            (None, Some(stored_cursor)) => {
487                // +1: The stored value is the last block that was processed, so we start from the next block.
488                eth_contracts_to_watch.insert(*contract, stored_cursor + 1);
489            }
490            (None, None) => {
491                // If no cursor is found, start from the fallback block.
492                eth_contracts_to_watch.insert(*contract, eth_contracts_start_block_fallback);
493            }
494        }
495    }
496    eth_contracts_to_watch
497}
498
499#[cfg(test)]
500mod tests {
501    use alloy::primitives::Address as EthAddress;
502    use alloy::primitives::U160;
503    use prometheus::Registry;
504
505    use super::*;
506    use crate::config::BridgeNodeConfig;
507    use crate::config::EthConfig;
508    use crate::config::SuiConfig;
509    use crate::config::default_ed25519_key_pair;
510    use crate::e2e_tests::test_utils::BridgeTestCluster;
511    use crate::e2e_tests::test_utils::BridgeTestClusterBuilder;
512    use crate::utils::wait_for_server_to_be_up;
513    use fastcrypto::secp256k1::Secp256k1KeyPair;
514    use sui_config::local_ip_utils::get_available_port;
515    use sui_types::base_types::SuiAddress;
516    use sui_types::bridge::BridgeChainId;
517    use sui_types::crypto::EncodeDecodeBase64;
518    use sui_types::crypto::KeypairTraits;
519    use sui_types::crypto::SuiKeyPair;
520    use sui_types::crypto::get_key_pair;
521    use sui_types::digests::TransactionDigest;
522    use sui_types::event::EventID;
523    use tempfile::tempdir;
524
525    #[tokio::test]
526    async fn test_get_eth_contracts_to_watch() {
527        telemetry_subscribers::init_for_testing();
528        let temp_dir = tempfile::tempdir().unwrap();
529        let eth_contracts = vec![
530            EthAddress::from(U160::from(1)),
531            EthAddress::from(U160::from(2)),
532        ];
533        let store = BridgeOrchestratorTables::new(temp_dir.path());
534
535        // No override, no watermark found in DB, use fallback
536        let contracts = get_eth_contracts_to_watch(&store, &eth_contracts, 10, None);
537        assert_eq!(
538            contracts,
539            vec![(eth_contracts[0], 10), (eth_contracts[1], 10)]
540                .into_iter()
541                .collect::<HashMap<_, _>>()
542        );
543
544        // no watermark found in DB, use override
545        let contracts = get_eth_contracts_to_watch(&store, &eth_contracts, 10, Some(420));
546        assert_eq!(
547            contracts,
548            vec![(eth_contracts[0], 420), (eth_contracts[1], 420)]
549                .into_iter()
550                .collect::<HashMap<_, _>>()
551        );
552
553        store
554            .update_eth_event_cursor(eth_contracts[0], 100)
555            .unwrap();
556        store
557            .update_eth_event_cursor(eth_contracts[1], 102)
558            .unwrap();
559
560        // No override, found watermarks in DB, use +1
561        let contracts = get_eth_contracts_to_watch(&store, &eth_contracts, 10, None);
562        assert_eq!(
563            contracts,
564            vec![(eth_contracts[0], 101), (eth_contracts[1], 103)]
565                .into_iter()
566                .collect::<HashMap<_, _>>()
567        );
568
569        // use override
570        let contracts = get_eth_contracts_to_watch(&store, &eth_contracts, 10, Some(200));
571        assert_eq!(
572            contracts,
573            vec![(eth_contracts[0], 200), (eth_contracts[1], 200)]
574                .into_iter()
575                .collect::<HashMap<_, _>>()
576        );
577    }
578
579    #[tokio::test(flavor = "multi_thread", worker_threads = 8)]
580    async fn test_starting_bridge_node() {
581        telemetry_subscribers::init_for_testing();
582        let bridge_test_cluster = setup().await;
583        let kp = bridge_test_cluster.bridge_authority_key(0);
584
585        // prepare node config (server only)
586        let tmp_dir = tempdir().unwrap().keep();
587        let authority_key_path = "test_starting_bridge_node_bridge_authority_key";
588        let server_listen_port = get_available_port("127.0.0.1");
589        let base64_encoded = kp.encode_base64();
590        std::fs::write(tmp_dir.join(authority_key_path), base64_encoded).unwrap();
591
592        let config = BridgeNodeConfig {
593            server_listen_port,
594            metrics_port: get_available_port("127.0.0.1"),
595            bridge_authority_key_path: tmp_dir.join(authority_key_path),
596            sui: SuiConfig {
597                sui_rpc_url: bridge_test_cluster.sui_rpc_url(),
598                sui_bridge_chain_id: BridgeChainId::SuiCustom as u8,
599                bridge_client_key_path: None,
600                bridge_client_gas_object: None,
601                sui_bridge_module_last_processed_event_id_override: None,
602                sui_bridge_next_sequence_number_override: None,
603            },
604            eth: EthConfig {
605                eth_rpc_url: None,
606                eth_rpc_urls: Some(vec![bridge_test_cluster.eth_rpc_url()]),
607                eth_rpc_quorum: 1,
608                eth_health_check_interval_secs: 300,
609                eth_bridge_proxy_address: bridge_test_cluster.sui_bridge_address(),
610                eth_bridge_chain_id: BridgeChainId::EthCustom as u8,
611                eth_contracts_start_block_fallback: None,
612                eth_contracts_start_block_override: None,
613            },
614            approved_governance_actions: vec![],
615            run_client: false,
616            db_path: None,
617            metrics_key_pair: default_ed25519_key_pair(),
618            metrics: None,
619            watchdog_config: None,
620        };
621        // Spawn bridge node in memory
622        let _handle = run_bridge_node(
623            config,
624            BridgeNodePublicMetadata::empty_for_testing(),
625            Registry::new(),
626        )
627        .await
628        .unwrap();
629
630        let server_url = format!("http://127.0.0.1:{}", server_listen_port);
631        // Now we expect to see the server to be up and running.
632        let res = wait_for_server_to_be_up(server_url, 5).await;
633        res.unwrap();
634    }
635
636    #[tokio::test(flavor = "multi_thread", worker_threads = 8)]
637    async fn test_starting_bridge_node_with_client() {
638        telemetry_subscribers::init_for_testing();
639        let bridge_test_cluster = setup().await;
640        let kp = bridge_test_cluster.bridge_authority_key(0);
641
642        // prepare node config (server + client)
643        let tmp_dir = tempdir().unwrap().keep();
644        let db_path = tmp_dir.join("test_starting_bridge_node_with_client_db");
645        let authority_key_path = "test_starting_bridge_node_with_client_bridge_authority_key";
646        let server_listen_port = get_available_port("127.0.0.1");
647
648        let base64_encoded = kp.encode_base64();
649        std::fs::write(tmp_dir.join(authority_key_path), base64_encoded).unwrap();
650
651        let client_sui_address = SuiAddress::from(kp.public());
652        let sender_address = bridge_test_cluster.sui_user_address();
653        // send some gas to this address
654        bridge_test_cluster
655            .test_cluster
656            .inner
657            .transfer_sui_must_exceed(sender_address, client_sui_address, 1000000000)
658            .await;
659
660        let config = BridgeNodeConfig {
661            server_listen_port,
662            metrics_port: get_available_port("127.0.0.1"),
663            bridge_authority_key_path: tmp_dir.join(authority_key_path),
664            sui: SuiConfig {
665                sui_rpc_url: bridge_test_cluster.sui_rpc_url(),
666                sui_bridge_chain_id: BridgeChainId::SuiCustom as u8,
667                bridge_client_key_path: None,
668                bridge_client_gas_object: None,
669                sui_bridge_module_last_processed_event_id_override: Some(EventID {
670                    tx_digest: TransactionDigest::random(),
671                    event_seq: 0,
672                }),
673                sui_bridge_next_sequence_number_override: None,
674            },
675            eth: EthConfig {
676                eth_rpc_url: None,
677                eth_rpc_urls: Some(vec![bridge_test_cluster.eth_rpc_url()]),
678                eth_rpc_quorum: 1,
679                eth_health_check_interval_secs: 300,
680                eth_bridge_proxy_address: bridge_test_cluster.sui_bridge_address(),
681                eth_bridge_chain_id: BridgeChainId::EthCustom as u8,
682                eth_contracts_start_block_fallback: Some(0),
683                eth_contracts_start_block_override: None,
684            },
685            approved_governance_actions: vec![],
686            run_client: true,
687            db_path: Some(db_path),
688            metrics_key_pair: default_ed25519_key_pair(),
689            metrics: None,
690            watchdog_config: None,
691        };
692        // Spawn bridge node in memory
693        let _handle = run_bridge_node(
694            config,
695            BridgeNodePublicMetadata::empty_for_testing(),
696            Registry::new(),
697        )
698        .await
699        .unwrap();
700
701        let server_url = format!("http://127.0.0.1:{}", server_listen_port);
702        // Now we expect to see the server to be up and running.
703        // client components are spawned earlier than server, so as long as the server is up,
704        // we know the client components are already running.
705        let res = wait_for_server_to_be_up(server_url, 5).await;
706        res.unwrap();
707    }
708
709    #[tokio::test(flavor = "multi_thread", worker_threads = 8)]
710    async fn test_starting_bridge_node_with_client_and_separate_client_key() {
711        telemetry_subscribers::init_for_testing();
712        let bridge_test_cluster = setup().await;
713        let kp = bridge_test_cluster.bridge_authority_key(0);
714
715        // prepare node config (server + client)
716        let tmp_dir = tempdir().unwrap().keep();
717        let db_path =
718            tmp_dir.join("test_starting_bridge_node_with_client_and_separate_client_key_db");
719        let authority_key_path =
720            "test_starting_bridge_node_with_client_and_separate_client_key_bridge_authority_key";
721        let server_listen_port = get_available_port("127.0.0.1");
722
723        // prepare bridge authority key
724        let base64_encoded = kp.encode_base64();
725        std::fs::write(tmp_dir.join(authority_key_path), base64_encoded).unwrap();
726
727        // prepare bridge client key
728        let (_, kp): (_, Secp256k1KeyPair) = get_key_pair();
729        let kp = SuiKeyPair::from(kp);
730        let client_key_path =
731            "test_starting_bridge_node_with_client_and_separate_client_key_bridge_client_key";
732        std::fs::write(tmp_dir.join(client_key_path), kp.encode_base64()).unwrap();
733        let client_sui_address = SuiAddress::from(&kp.public());
734        let sender_address = bridge_test_cluster.sui_user_address();
735        // send some gas to this address
736        let gas_obj = bridge_test_cluster
737            .test_cluster
738            .inner
739            .transfer_sui_must_exceed(sender_address, client_sui_address, 1000000000)
740            .await;
741
742        let config = BridgeNodeConfig {
743            server_listen_port,
744            metrics_port: get_available_port("127.0.0.1"),
745            bridge_authority_key_path: tmp_dir.join(authority_key_path),
746            sui: SuiConfig {
747                sui_rpc_url: bridge_test_cluster.sui_rpc_url(),
748                sui_bridge_chain_id: BridgeChainId::SuiCustom as u8,
749                bridge_client_key_path: Some(tmp_dir.join(client_key_path)),
750                bridge_client_gas_object: Some(gas_obj),
751                sui_bridge_module_last_processed_event_id_override: Some(EventID {
752                    tx_digest: TransactionDigest::random(),
753                    event_seq: 0,
754                }),
755                sui_bridge_next_sequence_number_override: None,
756            },
757            eth: EthConfig {
758                eth_rpc_url: None,
759                eth_rpc_urls: Some(vec![bridge_test_cluster.eth_rpc_url()]),
760                eth_rpc_quorum: 1,
761                eth_health_check_interval_secs: 300,
762                eth_bridge_proxy_address: bridge_test_cluster.sui_bridge_address(),
763                eth_bridge_chain_id: BridgeChainId::EthCustom as u8,
764                eth_contracts_start_block_fallback: Some(0),
765                eth_contracts_start_block_override: Some(0),
766            },
767            approved_governance_actions: vec![],
768            run_client: true,
769            db_path: Some(db_path),
770            metrics_key_pair: default_ed25519_key_pair(),
771            metrics: None,
772            watchdog_config: None,
773        };
774        // Spawn bridge node in memory
775        let _handle = run_bridge_node(
776            config,
777            BridgeNodePublicMetadata::empty_for_testing(),
778            Registry::new(),
779        )
780        .await
781        .unwrap();
782
783        let server_url = format!("http://127.0.0.1:{}", server_listen_port);
784        // Now we expect to see the server to be up and running.
785        // client components are spawned earlier than server, so as long as the server is up,
786        // we know the client components are already running.
787        let res = wait_for_server_to_be_up(server_url, 5).await;
788        res.unwrap();
789    }
790
791    async fn setup() -> BridgeTestCluster {
792        BridgeTestClusterBuilder::new()
793            .with_eth_env(true)
794            .with_bridge_cluster(false)
795            .with_num_validators(2)
796            .build()
797            .await
798    }
799}