sui_bridge/
node.rs

1// Copyright (c) Mysten Labs, Inc.
2// SPDX-License-Identifier: Apache-2.0
3
4use crate::action_executor::BridgeActionExecutor;
5use crate::client::bridge_authority_aggregator::BridgeAuthorityAggregator;
6use crate::config::{BridgeClientConfig, BridgeNodeConfig, WatchdogConfig};
7use crate::crypto::BridgeAuthorityPublicKeyBytes;
8use crate::eth_syncer::EthSyncer;
9use crate::events::init_all_struct_tags;
10use crate::metrics::BridgeMetrics;
11use crate::monitor::{self, BridgeMonitor};
12use crate::orchestrator::BridgeOrchestrator;
13use crate::server::handler::BridgeRequestHandler;
14use crate::server::{BridgeNodePublicMetadata, run_server};
15use crate::storage::BridgeOrchestratorTables;
16use crate::sui_bridge_watchdog::eth_bridge_status::EthBridgeStatus;
17use crate::sui_bridge_watchdog::eth_vault_balance::{EthereumVaultBalance, VaultAsset};
18use crate::sui_bridge_watchdog::metrics::WatchdogMetrics;
19use crate::sui_bridge_watchdog::sui_bridge_status::SuiBridgeStatus;
20use crate::sui_bridge_watchdog::total_supplies::TotalSupplies;
21use crate::sui_bridge_watchdog::{BridgeWatchDog, Observable};
22use crate::sui_client::SuiBridgeClient;
23use crate::sui_syncer::SuiSyncer;
24use crate::types::BridgeCommittee;
25use crate::utils::{
26    EthProvider, get_committee_voting_power_by_name, get_eth_contract_addresses,
27    get_validator_names_by_pub_keys,
28};
29use alloy::primitives::Address as EthAddress;
30use arc_swap::ArcSwap;
31use mysten_metrics::spawn_logged_monitored_task;
32use std::collections::{BTreeMap, HashMap};
33use std::net::{IpAddr, Ipv4Addr, SocketAddr};
34use std::sync::Arc;
35use std::time::Duration;
36use sui_types::Identifier;
37use sui_types::bridge::{
38    BRIDGE_COMMITTEE_MODULE_NAME, BRIDGE_LIMITER_MODULE_NAME, BRIDGE_MODULE_NAME,
39    BRIDGE_TREASURY_MODULE_NAME,
40};
41use sui_types::event::EventID;
42use tokio::task::JoinHandle;
43use tracing::info;
44
45pub async fn run_bridge_node(
46    config: BridgeNodeConfig,
47    metadata: BridgeNodePublicMetadata,
48    prometheus_registry: prometheus::Registry,
49) -> anyhow::Result<JoinHandle<()>> {
50    init_all_struct_tags();
51    let metrics = Arc::new(BridgeMetrics::new(&prometheus_registry));
52    let watchdog_config = config.watchdog_config.clone();
53    let (server_config, client_config) = config.validate(metrics.clone()).await?;
54    let sui_chain_identifier = server_config
55        .sui_client
56        .get_chain_identifier()
57        .await
58        .map_err(|e| anyhow::anyhow!("Failed to get sui chain identifier: {:?}", e))?;
59    let eth_chain_identifier = server_config
60        .eth_client
61        .get_chain_id()
62        .await
63        .map_err(|e| anyhow::anyhow!("Failed to get eth chain identifier: {:?}", e))?;
64    prometheus_registry
65        .register(mysten_metrics::bridge_uptime_metric(
66            "bridge",
67            metadata.version,
68            &sui_chain_identifier,
69            &eth_chain_identifier.to_string(),
70            client_config.is_some(),
71        ))
72        .unwrap();
73
74    let committee = Arc::new(
75        server_config
76            .sui_client
77            .get_bridge_committee()
78            .await
79            .expect("Failed to get committee"),
80    );
81    let mut handles = vec![];
82
83    // Start watchdog
84    let eth_provider = server_config.eth_client.provider();
85    let eth_bridge_proxy_address = server_config.eth_bridge_proxy_address;
86    let sui_client = server_config.sui_client.clone();
87    handles.push(spawn_logged_monitored_task!(start_watchdog(
88        watchdog_config,
89        &prometheus_registry,
90        eth_provider,
91        eth_bridge_proxy_address,
92        sui_client
93    )));
94
95    // Update voting right metrics
96    // Before reconfiguration happens we only set it once when the node starts
97    let sui_system = server_config
98        .sui_client
99        .grpc_client()
100        .get_system_state_summary(None)
101        .await?;
102
103    // Start Client
104    if let Some(client_config) = client_config {
105        let committee_keys_to_names =
106            Arc::new(get_validator_names_by_pub_keys(&committee, &sui_system).await);
107        let client_components = start_client_components(
108            client_config,
109            committee.clone(),
110            committee_keys_to_names,
111            metrics.clone(),
112        )
113        .await?;
114        handles.extend(client_components);
115    }
116
117    let committee_name_mapping = get_committee_voting_power_by_name(&committee, &sui_system).await;
118    for (name, voting_power) in committee_name_mapping.into_iter() {
119        metrics
120            .current_bridge_voting_rights
121            .with_label_values(&[name.as_str()])
122            .set(voting_power as i64);
123    }
124
125    // Start Server
126    let socket_address = SocketAddr::new(
127        IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
128        server_config.server_listen_port,
129    );
130    Ok(run_server(
131        &socket_address,
132        BridgeRequestHandler::new(
133            server_config.key,
134            server_config.sui_client,
135            server_config.eth_client,
136            server_config.approved_governance_actions,
137        ),
138        metrics,
139        Arc::new(metadata),
140    ))
141}
142
143async fn start_watchdog(
144    watchdog_config: Option<WatchdogConfig>,
145    registry: &prometheus::Registry,
146    eth_provider: EthProvider,
147    eth_bridge_proxy_address: EthAddress,
148    sui_client: Arc<SuiBridgeClient>,
149) {
150    let watchdog_metrics = WatchdogMetrics::new(registry);
151    let (
152        _committee_address,
153        _limiter_address,
154        vault_address,
155        _config_address,
156        weth_address,
157        usdt_address,
158        wbtc_address,
159        lbtc_address,
160    ) = get_eth_contract_addresses(eth_bridge_proxy_address, eth_provider.clone())
161        .await
162        .unwrap_or_else(|e| panic!("get_eth_contract_addresses should not fail: {}", e));
163
164    // If vault_address is zero (can happen due to storage layout mismatch during upgrades),
165    // skip vault balance monitoring but allow node to start for signing server functionality.
166    let vault_monitoring_enabled = !vault_address.is_zero() && !weth_address.is_zero();
167    if !vault_monitoring_enabled {
168        tracing::warn!(
169            "Vault address or token addresses are zero - skipping vault balance monitoring. \
170            This is expected during storage layout mismatch recovery."
171        );
172    }
173
174    let eth_bridge_status = EthBridgeStatus::new(
175        eth_provider.clone(),
176        eth_bridge_proxy_address,
177        watchdog_metrics.eth_bridge_paused.clone(),
178    );
179
180    let sui_bridge_status = SuiBridgeStatus::new(
181        sui_client.clone(),
182        watchdog_metrics.sui_bridge_paused.clone(),
183    );
184
185    let mut observables: Vec<Box<dyn Observable + Send + Sync>> =
186        vec![Box::new(eth_bridge_status), Box::new(sui_bridge_status)];
187
188    // Add vault balance monitors only when addresses are valid
189    if vault_monitoring_enabled {
190        let eth_vault_balance = EthereumVaultBalance::new(
191            eth_provider.clone(),
192            vault_address,
193            weth_address,
194            VaultAsset::WETH,
195            watchdog_metrics.eth_vault_balance.clone(),
196        )
197        .await
198        .unwrap_or_else(|e| panic!("Failed to create eth vault balance: {}", e));
199
200        let usdt_vault_balance = EthereumVaultBalance::new(
201            eth_provider.clone(),
202            vault_address,
203            usdt_address,
204            VaultAsset::USDT,
205            watchdog_metrics.usdt_vault_balance.clone(),
206        )
207        .await
208        .unwrap_or_else(|e| panic!("Failed to create usdt vault balance: {}", e));
209
210        let wbtc_vault_balance = EthereumVaultBalance::new(
211            eth_provider.clone(),
212            vault_address,
213            wbtc_address,
214            VaultAsset::WBTC,
215            watchdog_metrics.wbtc_vault_balance.clone(),
216        )
217        .await
218        .unwrap_or_else(|e| panic!("Failed to create wbtc vault balance: {}", e));
219
220        observables.push(Box::new(eth_vault_balance));
221        observables.push(Box::new(usdt_vault_balance));
222        observables.push(Box::new(wbtc_vault_balance));
223
224        if !lbtc_address.is_zero() {
225            let lbtc_vault_balance = EthereumVaultBalance::new(
226                eth_provider,
227                vault_address,
228                lbtc_address,
229                VaultAsset::LBTC,
230                watchdog_metrics.lbtc_vault_balance.clone(),
231            )
232            .await
233            .unwrap_or_else(|e| panic!("Failed to create lbtc vault balance: {}", e));
234            observables.push(Box::new(lbtc_vault_balance));
235        }
236    }
237
238    if let Some(watchdog_config) = watchdog_config
239        && !watchdog_config.total_supplies.is_empty()
240    {
241        let total_supplies = TotalSupplies::new(
242            sui_client.grpc_client().clone().into_inner(),
243            watchdog_config.total_supplies,
244            watchdog_metrics.total_supplies.clone(),
245        );
246        observables.push(Box::new(total_supplies));
247    }
248
249    BridgeWatchDog::new(observables).run().await
250}
251
252// TODO: is there a way to clean up the overrides after it's stored in DB?
253async fn start_client_components(
254    client_config: BridgeClientConfig,
255    committee: Arc<BridgeCommittee>,
256    committee_keys_to_names: Arc<BTreeMap<BridgeAuthorityPublicKeyBytes, String>>,
257    metrics: Arc<BridgeMetrics>,
258) -> anyhow::Result<Vec<JoinHandle<()>>> {
259    let store: std::sync::Arc<BridgeOrchestratorTables> =
260        BridgeOrchestratorTables::new(&client_config.db_path.join("client"));
261    let sui_modules_to_watch = get_sui_modules_to_watch(
262        &store,
263        client_config.sui_bridge_module_last_processed_event_id_override,
264    );
265
266    let eth_contracts_to_watch = get_eth_contracts_to_watch(
267        &store,
268        &client_config.eth_contracts,
269        client_config.eth_contracts_start_block_fallback,
270        client_config.eth_contracts_start_block_override,
271    );
272
273    let sui_client = client_config.sui_client.clone();
274
275    let last_processed_bridge_event_id = sui_modules_to_watch
276        .get(&BRIDGE_MODULE_NAME.to_owned())
277        .and_then(|opt| *opt);
278
279    let next_sequence_number = get_next_sequence_number(
280        &store,
281        &sui_client,
282        last_processed_bridge_event_id,
283        client_config.sui_bridge_next_sequence_number_override,
284    )
285    .await;
286
287    let mut all_handles = vec![];
288    let (task_handles, eth_events_rx, _) =
289        EthSyncer::new(client_config.eth_client.clone(), eth_contracts_to_watch)
290            .run(metrics.clone())
291            .await
292            .expect("Failed to start eth syncer");
293    all_handles.extend(task_handles);
294
295    let (task_handles, sui_grpc_events_rx) = SuiSyncer::new(
296        client_config.sui_client,
297        sui_modules_to_watch,
298        metrics.clone(),
299    )
300    .run_grpc(
301        client_config.sui_bridge_chain_id,
302        next_sequence_number,
303        Duration::from_secs(2),
304        10,
305    )
306    .await
307    .expect("Failed to start sui syncer");
308    all_handles.extend(task_handles);
309
310    let bridge_auth_agg = Arc::new(ArcSwap::from(Arc::new(BridgeAuthorityAggregator::new(
311        committee,
312        metrics.clone(),
313        committee_keys_to_names,
314    ))));
315    // TODO: should we use one query instead of two?
316    let sui_token_type_tags = sui_client.get_token_id_map().await.unwrap();
317    let is_bridge_paused = sui_client.is_bridge_paused().await.unwrap();
318
319    let (bridge_pause_tx, bridge_pause_rx) = tokio::sync::watch::channel(is_bridge_paused);
320
321    let (eth_monitor_tx, eth_monitor_rx) = mysten_metrics::metered_channel::channel(
322        10000,
323        &mysten_metrics::get_metrics()
324            .unwrap()
325            .channel_inflight
326            .with_label_values(&["eth_monitor_queue"]),
327    );
328
329    let sui_token_type_tags = Arc::new(ArcSwap::from(Arc::new(sui_token_type_tags)));
330    let bridge_action_executor = BridgeActionExecutor::new(
331        sui_client.clone(),
332        bridge_auth_agg.clone(),
333        store.clone(),
334        client_config.key,
335        client_config.sui_address,
336        client_config.gas_object_ref.0,
337        sui_token_type_tags.clone(),
338        bridge_pause_rx,
339        metrics.clone(),
340    )
341    .await;
342
343    let (sui_monitor_tx, sui_monitor_rx) = mysten_metrics::metered_channel::channel(
344        10000,
345        &mysten_metrics::get_metrics()
346            .unwrap()
347            .channel_inflight
348            .with_label_values(&["sui_monitor_queue"]),
349    );
350    tokio::spawn(monitor::subscribe_bridge_events(
351        sui_client.grpc_client().clone().into_inner(),
352        sui_monitor_tx,
353    ));
354    let monitor = BridgeMonitor::new(
355        sui_client.clone(),
356        sui_monitor_rx,
357        eth_monitor_rx,
358        bridge_auth_agg.clone(),
359        bridge_pause_tx,
360        sui_token_type_tags,
361        metrics.clone(),
362    );
363    all_handles.push(spawn_logged_monitored_task!(monitor.run()));
364
365    let orchestrator = BridgeOrchestrator::new(
366        sui_client,
367        sui_grpc_events_rx,
368        eth_events_rx,
369        store.clone(),
370        eth_monitor_tx,
371        metrics,
372    );
373
374    all_handles.extend(orchestrator.run_with_grpc(bridge_action_executor).await);
375    Ok(all_handles)
376}
377
378async fn get_next_sequence_number<C: crate::sui_client::SuiClientInner>(
379    store: &BridgeOrchestratorTables,
380    sui_client: &crate::sui_client::SuiClient<C>,
381    last_processed_bridge_event_id: Option<EventID>,
382    next_sequence_number_override: Option<u64>,
383) -> u64 {
384    if let Some(next_sequence_number_override) = next_sequence_number_override {
385        info!("Overriding next sequence number to {next_sequence_number_override}",);
386        return next_sequence_number_override;
387    }
388
389    if let Ok(Some(sequence_number)) = store.get_sui_sequence_number_cursor() {
390        info!("Using sequence number {sequence_number} from storage",);
391        return sequence_number;
392    }
393
394    if let Some(event_id) = last_processed_bridge_event_id {
395        match sui_client.get_sequence_number_from_event_id(event_id).await {
396            Ok(Some(sequence_number)) => {
397                let next = sequence_number + 1;
398                info!(
399                    ?event_id,
400                    last_processed_seq = sequence_number,
401                    next_seq_to_read = next,
402                    "Migrated from legacy event cursor to sequence number cursor"
403                );
404                return next;
405            }
406            Ok(None) => {
407                info!(
408                    ?event_id,
409                    "Could not extract sequence number from legacy event cursor, starting from 0"
410                );
411            }
412            Err(e) => {
413                info!(
414                    ?event_id,
415                    ?e,
416                    "Failed to get sequence number from legacy event cursor, starting from 0"
417                );
418            }
419        }
420    }
421
422    info!("No cursor found for gRPC syncer, starting from sequence number 0");
423    0
424}
425
426fn get_sui_modules_to_watch(
427    store: &std::sync::Arc<BridgeOrchestratorTables>,
428    sui_bridge_module_last_processed_event_id_override: Option<EventID>,
429) -> HashMap<Identifier, Option<EventID>> {
430    let sui_bridge_modules = vec![
431        BRIDGE_MODULE_NAME.to_owned(),
432        BRIDGE_COMMITTEE_MODULE_NAME.to_owned(),
433        BRIDGE_TREASURY_MODULE_NAME.to_owned(),
434        BRIDGE_LIMITER_MODULE_NAME.to_owned(),
435    ];
436    if let Some(cursor) = sui_bridge_module_last_processed_event_id_override {
437        info!("Overriding cursor for sui bridge modules to {:?}", cursor);
438        return HashMap::from_iter(
439            sui_bridge_modules
440                .iter()
441                .map(|module| (module.clone(), Some(cursor))),
442        );
443    }
444
445    let sui_bridge_module_stored_cursor = store
446        .get_sui_event_cursors(&sui_bridge_modules)
447        .expect("Failed to get eth sui event cursors from storage");
448    let mut sui_modules_to_watch = HashMap::new();
449    for (module_identifier, cursor) in sui_bridge_modules
450        .iter()
451        .zip(sui_bridge_module_stored_cursor)
452    {
453        if cursor.is_none() {
454            info!(
455                "No cursor found for sui bridge module {} in storage or config override, query start from the beginning.",
456                module_identifier
457            );
458        }
459        sui_modules_to_watch.insert(module_identifier.clone(), cursor);
460    }
461    sui_modules_to_watch
462}
463
464fn get_eth_contracts_to_watch(
465    store: &std::sync::Arc<BridgeOrchestratorTables>,
466    eth_contracts: &[EthAddress],
467    eth_contracts_start_block_fallback: u64,
468    eth_contracts_start_block_override: Option<u64>,
469) -> HashMap<EthAddress, u64> {
470    let stored_eth_cursors = store
471        .get_eth_event_cursors(eth_contracts)
472        .expect("Failed to get eth event cursors from storage");
473    let mut eth_contracts_to_watch = HashMap::new();
474    for (contract, stored_cursor) in eth_contracts.iter().zip(stored_eth_cursors) {
475        // start block precedence:
476        // eth_contracts_start_block_override > stored cursor > eth_contracts_start_block_fallback
477        match (eth_contracts_start_block_override, stored_cursor) {
478            (Some(override_), _) => {
479                eth_contracts_to_watch.insert(*contract, override_);
480                info!(
481                    "Overriding cursor for eth bridge contract {} to {}. Stored cursor: {:?}",
482                    contract, override_, stored_cursor
483                );
484            }
485            (None, Some(stored_cursor)) => {
486                // +1: The stored value is the last block that was processed, so we start from the next block.
487                eth_contracts_to_watch.insert(*contract, stored_cursor + 1);
488            }
489            (None, None) => {
490                // If no cursor is found, start from the fallback block.
491                eth_contracts_to_watch.insert(*contract, eth_contracts_start_block_fallback);
492            }
493        }
494    }
495    eth_contracts_to_watch
496}
497
498#[cfg(test)]
499mod tests {
500    use alloy::primitives::Address as EthAddress;
501    use alloy::primitives::U160;
502    use prometheus::Registry;
503
504    use super::*;
505    use crate::config::BridgeNodeConfig;
506    use crate::config::EthConfig;
507    use crate::config::SuiConfig;
508    use crate::config::default_ed25519_key_pair;
509    use crate::e2e_tests::test_utils::BridgeTestCluster;
510    use crate::e2e_tests::test_utils::BridgeTestClusterBuilder;
511    use crate::utils::wait_for_server_to_be_up;
512    use fastcrypto::secp256k1::Secp256k1KeyPair;
513    use sui_config::local_ip_utils::get_available_port;
514    use sui_types::base_types::SuiAddress;
515    use sui_types::bridge::BridgeChainId;
516    use sui_types::crypto::EncodeDecodeBase64;
517    use sui_types::crypto::KeypairTraits;
518    use sui_types::crypto::SuiKeyPair;
519    use sui_types::crypto::get_key_pair;
520    use sui_types::digests::TransactionDigest;
521    use sui_types::event::EventID;
522    use tempfile::tempdir;
523
524    #[tokio::test]
525    async fn test_get_eth_contracts_to_watch() {
526        telemetry_subscribers::init_for_testing();
527        let temp_dir = tempfile::tempdir().unwrap();
528        let eth_contracts = vec![
529            EthAddress::from(U160::from(1)),
530            EthAddress::from(U160::from(2)),
531        ];
532        let store = BridgeOrchestratorTables::new(temp_dir.path());
533
534        // No override, no watermark found in DB, use fallback
535        let contracts = get_eth_contracts_to_watch(&store, &eth_contracts, 10, None);
536        assert_eq!(
537            contracts,
538            vec![(eth_contracts[0], 10), (eth_contracts[1], 10)]
539                .into_iter()
540                .collect::<HashMap<_, _>>()
541        );
542
543        // no watermark found in DB, use override
544        let contracts = get_eth_contracts_to_watch(&store, &eth_contracts, 10, Some(420));
545        assert_eq!(
546            contracts,
547            vec![(eth_contracts[0], 420), (eth_contracts[1], 420)]
548                .into_iter()
549                .collect::<HashMap<_, _>>()
550        );
551
552        store
553            .update_eth_event_cursor(eth_contracts[0], 100)
554            .unwrap();
555        store
556            .update_eth_event_cursor(eth_contracts[1], 102)
557            .unwrap();
558
559        // No override, found watermarks in DB, use +1
560        let contracts = get_eth_contracts_to_watch(&store, &eth_contracts, 10, None);
561        assert_eq!(
562            contracts,
563            vec![(eth_contracts[0], 101), (eth_contracts[1], 103)]
564                .into_iter()
565                .collect::<HashMap<_, _>>()
566        );
567
568        // use override
569        let contracts = get_eth_contracts_to_watch(&store, &eth_contracts, 10, Some(200));
570        assert_eq!(
571            contracts,
572            vec![(eth_contracts[0], 200), (eth_contracts[1], 200)]
573                .into_iter()
574                .collect::<HashMap<_, _>>()
575        );
576    }
577
578    #[tokio::test(flavor = "multi_thread", worker_threads = 8)]
579    async fn test_starting_bridge_node() {
580        telemetry_subscribers::init_for_testing();
581        let bridge_test_cluster = setup().await;
582        let kp = bridge_test_cluster.bridge_authority_key(0);
583
584        // prepare node config (server only)
585        let tmp_dir = tempdir().unwrap().keep();
586        let authority_key_path = "test_starting_bridge_node_bridge_authority_key";
587        let server_listen_port = get_available_port("127.0.0.1");
588        let base64_encoded = kp.encode_base64();
589        std::fs::write(tmp_dir.join(authority_key_path), base64_encoded).unwrap();
590
591        let config = BridgeNodeConfig {
592            server_listen_port,
593            metrics_port: get_available_port("127.0.0.1"),
594            bridge_authority_key_path: tmp_dir.join(authority_key_path),
595            sui: SuiConfig {
596                sui_rpc_url: bridge_test_cluster.sui_rpc_url(),
597                sui_bridge_chain_id: BridgeChainId::SuiCustom as u8,
598                bridge_client_key_path: None,
599                bridge_client_gas_object: None,
600                sui_bridge_module_last_processed_event_id_override: None,
601                sui_bridge_next_sequence_number_override: None,
602            },
603            eth: EthConfig {
604                eth_rpc_url: bridge_test_cluster.eth_rpc_url(),
605                eth_bridge_proxy_address: bridge_test_cluster.sui_bridge_address(),
606                eth_bridge_chain_id: BridgeChainId::EthCustom as u8,
607                eth_contracts_start_block_fallback: None,
608                eth_contracts_start_block_override: None,
609            },
610            approved_governance_actions: vec![],
611            run_client: false,
612            db_path: None,
613            metrics_key_pair: default_ed25519_key_pair(),
614            metrics: None,
615            watchdog_config: None,
616        };
617        // Spawn bridge node in memory
618        let _handle = run_bridge_node(
619            config,
620            BridgeNodePublicMetadata::empty_for_testing(),
621            Registry::new(),
622        )
623        .await
624        .unwrap();
625
626        let server_url = format!("http://127.0.0.1:{}", server_listen_port);
627        // Now we expect to see the server to be up and running.
628        let res = wait_for_server_to_be_up(server_url, 5).await;
629        res.unwrap();
630    }
631
632    #[tokio::test(flavor = "multi_thread", worker_threads = 8)]
633    async fn test_starting_bridge_node_with_client() {
634        telemetry_subscribers::init_for_testing();
635        let bridge_test_cluster = setup().await;
636        let kp = bridge_test_cluster.bridge_authority_key(0);
637
638        // prepare node config (server + client)
639        let tmp_dir = tempdir().unwrap().keep();
640        let db_path = tmp_dir.join("test_starting_bridge_node_with_client_db");
641        let authority_key_path = "test_starting_bridge_node_with_client_bridge_authority_key";
642        let server_listen_port = get_available_port("127.0.0.1");
643
644        let base64_encoded = kp.encode_base64();
645        std::fs::write(tmp_dir.join(authority_key_path), base64_encoded).unwrap();
646
647        let client_sui_address = SuiAddress::from(kp.public());
648        let sender_address = bridge_test_cluster.sui_user_address();
649        // send some gas to this address
650        bridge_test_cluster
651            .test_cluster
652            .inner
653            .transfer_sui_must_exceed(sender_address, client_sui_address, 1000000000)
654            .await;
655
656        let config = BridgeNodeConfig {
657            server_listen_port,
658            metrics_port: get_available_port("127.0.0.1"),
659            bridge_authority_key_path: tmp_dir.join(authority_key_path),
660            sui: SuiConfig {
661                sui_rpc_url: bridge_test_cluster.sui_rpc_url(),
662                sui_bridge_chain_id: BridgeChainId::SuiCustom as u8,
663                bridge_client_key_path: None,
664                bridge_client_gas_object: None,
665                sui_bridge_module_last_processed_event_id_override: Some(EventID {
666                    tx_digest: TransactionDigest::random(),
667                    event_seq: 0,
668                }),
669                sui_bridge_next_sequence_number_override: None,
670            },
671            eth: EthConfig {
672                eth_rpc_url: bridge_test_cluster.eth_rpc_url(),
673                eth_bridge_proxy_address: bridge_test_cluster.sui_bridge_address(),
674                eth_bridge_chain_id: BridgeChainId::EthCustom as u8,
675                eth_contracts_start_block_fallback: Some(0),
676                eth_contracts_start_block_override: None,
677            },
678            approved_governance_actions: vec![],
679            run_client: true,
680            db_path: Some(db_path),
681            metrics_key_pair: default_ed25519_key_pair(),
682            metrics: None,
683            watchdog_config: None,
684        };
685        // Spawn bridge node in memory
686        let _handle = run_bridge_node(
687            config,
688            BridgeNodePublicMetadata::empty_for_testing(),
689            Registry::new(),
690        )
691        .await
692        .unwrap();
693
694        let server_url = format!("http://127.0.0.1:{}", server_listen_port);
695        // Now we expect to see the server to be up and running.
696        // client components are spawned earlier than server, so as long as the server is up,
697        // we know the client components are already running.
698        let res = wait_for_server_to_be_up(server_url, 5).await;
699        res.unwrap();
700    }
701
702    #[tokio::test(flavor = "multi_thread", worker_threads = 8)]
703    async fn test_starting_bridge_node_with_client_and_separate_client_key() {
704        telemetry_subscribers::init_for_testing();
705        let bridge_test_cluster = setup().await;
706        let kp = bridge_test_cluster.bridge_authority_key(0);
707
708        // prepare node config (server + client)
709        let tmp_dir = tempdir().unwrap().keep();
710        let db_path =
711            tmp_dir.join("test_starting_bridge_node_with_client_and_separate_client_key_db");
712        let authority_key_path =
713            "test_starting_bridge_node_with_client_and_separate_client_key_bridge_authority_key";
714        let server_listen_port = get_available_port("127.0.0.1");
715
716        // prepare bridge authority key
717        let base64_encoded = kp.encode_base64();
718        std::fs::write(tmp_dir.join(authority_key_path), base64_encoded).unwrap();
719
720        // prepare bridge client key
721        let (_, kp): (_, Secp256k1KeyPair) = get_key_pair();
722        let kp = SuiKeyPair::from(kp);
723        let client_key_path =
724            "test_starting_bridge_node_with_client_and_separate_client_key_bridge_client_key";
725        std::fs::write(tmp_dir.join(client_key_path), kp.encode_base64()).unwrap();
726        let client_sui_address = SuiAddress::from(&kp.public());
727        let sender_address = bridge_test_cluster.sui_user_address();
728        // send some gas to this address
729        let gas_obj = bridge_test_cluster
730            .test_cluster
731            .inner
732            .transfer_sui_must_exceed(sender_address, client_sui_address, 1000000000)
733            .await;
734
735        let config = BridgeNodeConfig {
736            server_listen_port,
737            metrics_port: get_available_port("127.0.0.1"),
738            bridge_authority_key_path: tmp_dir.join(authority_key_path),
739            sui: SuiConfig {
740                sui_rpc_url: bridge_test_cluster.sui_rpc_url(),
741                sui_bridge_chain_id: BridgeChainId::SuiCustom as u8,
742                bridge_client_key_path: Some(tmp_dir.join(client_key_path)),
743                bridge_client_gas_object: Some(gas_obj),
744                sui_bridge_module_last_processed_event_id_override: Some(EventID {
745                    tx_digest: TransactionDigest::random(),
746                    event_seq: 0,
747                }),
748                sui_bridge_next_sequence_number_override: None,
749            },
750            eth: EthConfig {
751                eth_rpc_url: bridge_test_cluster.eth_rpc_url(),
752                eth_bridge_proxy_address: bridge_test_cluster.sui_bridge_address(),
753                eth_bridge_chain_id: BridgeChainId::EthCustom as u8,
754                eth_contracts_start_block_fallback: Some(0),
755                eth_contracts_start_block_override: Some(0),
756            },
757            approved_governance_actions: vec![],
758            run_client: true,
759            db_path: Some(db_path),
760            metrics_key_pair: default_ed25519_key_pair(),
761            metrics: None,
762            watchdog_config: None,
763        };
764        // Spawn bridge node in memory
765        let _handle = run_bridge_node(
766            config,
767            BridgeNodePublicMetadata::empty_for_testing(),
768            Registry::new(),
769        )
770        .await
771        .unwrap();
772
773        let server_url = format!("http://127.0.0.1:{}", server_listen_port);
774        // Now we expect to see the server to be up and running.
775        // client components are spawned earlier than server, so as long as the server is up,
776        // we know the client components are already running.
777        let res = wait_for_server_to_be_up(server_url, 5).await;
778        res.unwrap();
779    }
780
781    async fn setup() -> BridgeTestCluster {
782        BridgeTestClusterBuilder::new()
783            .with_eth_env(true)
784            .with_bridge_cluster(false)
785            .with_num_validators(2)
786            .build()
787            .await
788    }
789}