#![doc = include_str!("../README.md")]
pub mod error;
pub mod faucet;
pub mod query_types;
pub mod streams;
use error::Error;
use query_types::ActiveValidatorsArgs;
use query_types::ActiveValidatorsQuery;
use query_types::BalanceArgs;
use query_types::BalanceQuery;
use query_types::ChainIdentifierQuery;
use query_types::CheckpointArgs;
use query_types::CheckpointId;
use query_types::CheckpointQuery;
use query_types::CheckpointsArgs;
use query_types::CheckpointsQuery;
use query_types::CoinMetadata;
use query_types::CoinMetadataArgs;
use query_types::CoinMetadataQuery;
use query_types::DefaultSuinsNameQuery;
use query_types::DefaultSuinsNameQueryArgs;
use query_types::DryRunArgs;
use query_types::DryRunQuery;
use query_types::DynamicFieldArgs;
use query_types::DynamicFieldConnectionArgs;
use query_types::DynamicFieldQuery;
use query_types::DynamicFieldsOwnerQuery;
use query_types::DynamicObjectFieldQuery;
use query_types::Epoch;
use query_types::EpochArgs;
use query_types::EpochQuery;
use query_types::EpochSummaryQuery;
use query_types::EventFilter;
use query_types::EventsQuery;
use query_types::EventsQueryArgs;
use query_types::ExecuteTransactionArgs;
use query_types::ExecuteTransactionQuery;
use query_types::LatestPackageQuery;
use query_types::MoveFunction;
use query_types::MoveModule;
use query_types::MovePackageVersionFilter;
use query_types::NormalizedMoveFunctionQuery;
use query_types::NormalizedMoveFunctionQueryArgs;
use query_types::NormalizedMoveModuleQuery;
use query_types::NormalizedMoveModuleQueryArgs;
use query_types::ObjectFilter;
use query_types::ObjectQuery;
use query_types::ObjectQueryArgs;
use query_types::ObjectsQuery;
use query_types::ObjectsQueryArgs;
use query_types::PackageArgs;
use query_types::PackageByNameArgs;
use query_types::PackageByNameQuery;
use query_types::PackageCheckpointFilter;
use query_types::PackageQuery;
use query_types::PackageVersionsArgs;
use query_types::PackageVersionsQuery;
use query_types::PackagesQuery;
use query_types::PackagesQueryArgs;
use query_types::PageInfo;
use query_types::ProtocolConfigQuery;
use query_types::ProtocolConfigs;
use query_types::ProtocolVersionArgs;
use query_types::ResolveSuinsQuery;
use query_types::ResolveSuinsQueryArgs;
use query_types::ServiceConfig;
use query_types::ServiceConfigQuery;
use query_types::TransactionBlockArgs;
use query_types::TransactionBlockEffectsQuery;
use query_types::TransactionBlockQuery;
use query_types::TransactionBlocksEffectsQuery;
use query_types::TransactionBlocksQuery;
use query_types::TransactionBlocksQueryArgs;
use query_types::TransactionMetadata;
use query_types::TransactionsFilter;
use query_types::Validator;
use streams::stream_paginated_query;
use sui_types::framework::Coin;
use sui_types::Address;
use sui_types::CheckpointDigest;
use sui_types::CheckpointSequenceNumber;
use sui_types::CheckpointSummary;
use sui_types::Event;
use sui_types::MovePackage;
use sui_types::Object;
use sui_types::SignedTransaction;
use sui_types::Transaction;
use sui_types::TransactionDigest;
use sui_types::TransactionEffects;
use sui_types::TransactionKind;
use sui_types::TypeTag;
use sui_types::UserSignature;
use base64ct::Encoding;
use cynic::serde;
use cynic::GraphQlResponse;
use cynic::MutationBuilder;
use cynic::Operation;
use cynic::QueryBuilder;
use futures::Stream;
use reqwest::Url;
use serde::de::DeserializeOwned;
use serde::Serialize;
use std::str::FromStr;
use crate::error::Kind;
use crate::error::Result;
use crate::query_types::CheckpointTotalTxQuery;
use query_types::EpochsArgs;
use query_types::EpochsQuery;
use query_types::TransactionBlockWithEffectsQuery;
use query_types::TransactionBlocksWithEffectsQuery;
const DEFAULT_ITEMS_PER_PAGE: i32 = 10;
const MAINNET_HOST: &str = "https://sui-mainnet.mystenlabs.com/graphql";
const TESTNET_HOST: &str = "https://sui-testnet.mystenlabs.com/graphql";
const DEVNET_HOST: &str = "https://sui-devnet.mystenlabs.com/graphql";
const LOCAL_HOST: &str = "http://localhost:9125/graphql";
static USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"),);
#[derive(Debug)]
pub struct DryRunResult {
pub effects: Option<TransactionEffects>,
pub error: Option<String>,
}
pub struct TransactionDataEffects {
pub tx: SignedTransaction,
pub effects: TransactionEffects,
}
#[derive(Clone, Debug)]
pub struct DynamicFieldName {
pub type_: TypeTag,
pub bcs: Vec<u8>,
pub json: Option<serde_json::Value>,
}
#[derive(Clone, Debug)]
pub struct DynamicFieldOutput {
pub name: DynamicFieldName,
pub value: Option<(TypeTag, Vec<u8>)>,
pub value_as_json: Option<serde_json::Value>,
}
pub struct NameValue(Vec<u8>);
pub struct BcsName(pub Vec<u8>);
#[derive(Clone, Debug)]
pub struct Page<T> {
page_info: PageInfo,
data: Vec<T>,
}
impl<T> Page<T> {
pub fn page_info(&self) -> &PageInfo {
&self.page_info
}
pub fn data(&self) -> &[T] {
&self.data
}
pub fn new(page_info: PageInfo, data: Vec<T>) -> Self {
Self { page_info, data }
}
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
pub fn new_empty() -> Self {
Self::new(PageInfo::default(), vec![])
}
pub fn into_parts(self) -> (PageInfo, Vec<T>) {
(self.page_info, self.data)
}
}
#[derive(Clone, Debug, Default)]
pub enum Direction {
#[default]
Forward,
Backward,
}
#[derive(Clone, Debug, Default)]
pub struct PaginationFilter {
pub direction: Direction,
pub cursor: Option<String>,
pub limit: Option<i32>,
}
impl<T: Serialize> From<T> for NameValue {
fn from(value: T) -> Self {
NameValue(bcs::to_bytes(&value).unwrap())
}
}
impl From<BcsName> for NameValue {
fn from(value: BcsName) -> Self {
NameValue(value.0)
}
}
impl DynamicFieldOutput {
pub fn deserialize_name<T: DeserializeOwned>(&self, expected_type: &TypeTag) -> Result<T> {
assert_eq!(
expected_type, &self.name.type_,
"Expected type {}, but got {}",
expected_type, &self.name.type_
);
let bcs = &self.name.bcs;
bcs::from_bytes::<T>(bcs).map_err(Into::into)
}
pub fn deserialize_value<T: DeserializeOwned>(&self, expected_type: &TypeTag) -> Result<T> {
let typetag = self.value.as_ref().map(|(typename, _)| typename);
assert_eq!(
Some(&expected_type),
typetag.as_ref(),
"Expected type {}, but got {:?}",
expected_type,
typetag
);
if let Some((_, bcs)) = &self.value {
bcs::from_bytes::<T>(bcs).map_err(Into::into)
} else {
Err(Error::from_error(Kind::Deserialization, "Value is missing"))
}
}
}
pub struct Client {
rpc: Url,
inner: reqwest::Client,
service_config: std::sync::OnceLock<ServiceConfig>,
}
impl Client {
pub fn new(server: &str) -> Result<Self> {
let rpc = reqwest::Url::parse(server)?;
let client = Client {
rpc,
inner: reqwest::Client::builder().user_agent(USER_AGENT).build()?,
service_config: Default::default(),
};
Ok(client)
}
pub fn new_mainnet() -> Self {
Self::new(MAINNET_HOST).expect("Invalid mainnet URL")
}
pub fn new_testnet() -> Self {
Self::new(TESTNET_HOST).expect("Invalid testnet URL")
}
pub fn new_devnet() -> Self {
Self::new(DEVNET_HOST).expect("Invalid devnet URL")
}
pub fn new_localhost() -> Self {
Self::new(LOCAL_HOST).expect("Invalid localhost URL")
}
pub fn set_rpc_server(&mut self, server: &str) -> Result<()> {
let rpc = reqwest::Url::parse(server)?;
self.rpc = rpc;
Ok(())
}
fn rpc_server(&self) -> &str {
self.rpc.as_str()
}
pub async fn pagination_filter(
&self,
pagination_filter: PaginationFilter,
) -> (Option<String>, Option<String>, Option<i32>, Option<i32>) {
let limit = pagination_filter
.limit
.unwrap_or(self.max_page_size().await.unwrap_or(DEFAULT_ITEMS_PER_PAGE));
let (after, before, first, last) = match pagination_filter.direction {
Direction::Forward => (pagination_filter.cursor, None, Some(limit), None),
Direction::Backward => (None, pagination_filter.cursor, None, Some(limit)),
};
(after, before, first, last)
}
pub async fn max_page_size(&self) -> Result<i32> {
self.service_config().await.map(|cfg| cfg.max_page_size)
}
pub async fn run_query<T, V>(&self, operation: &Operation<T, V>) -> Result<GraphQlResponse<T>>
where
T: serde::de::DeserializeOwned,
V: serde::Serialize,
{
let res = self
.inner
.post(self.rpc_server())
.json(&operation)
.send()
.await?
.json::<GraphQlResponse<T>>()
.await?;
Ok(res)
}
pub async fn chain_id(&self) -> Result<String> {
let operation = ChainIdentifierQuery::build(());
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
response
.data
.map(|e| e.chain_identifier)
.ok_or_else(Error::empty_response_error)
}
pub async fn reference_gas_price(&self, epoch: Option<u64>) -> Result<Option<u64>> {
let operation = EpochSummaryQuery::build(EpochArgs { id: epoch });
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
response
.data
.and_then(|e| e.epoch)
.and_then(|e| e.reference_gas_price)
.map(|x| x.try_into())
.transpose()
}
pub async fn protocol_config(&self, version: Option<u64>) -> Result<Option<ProtocolConfigs>> {
let operation = ProtocolConfigQuery::build(ProtocolVersionArgs { id: version });
let response = self.run_query(&operation).await?;
Ok(response.data.map(|p| p.protocol_config))
}
pub async fn service_config(&self) -> Result<&ServiceConfig> {
if let Some(service_config) = self.service_config.get() {
return Ok(service_config);
}
let service_config = {
let operation = ServiceConfigQuery::build(());
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
response
.data
.map(|s| s.service_config)
.ok_or_else(Error::empty_response_error)?
};
let service_config = self.service_config.get_or_init(move || service_config);
Ok(service_config)
}
pub async fn active_validators(
&self,
epoch: Option<u64>,
pagination_filter: PaginationFilter,
) -> Result<Page<Validator>> {
let (after, before, first, last) = self.pagination_filter(pagination_filter).await;
let operation = ActiveValidatorsQuery::build(ActiveValidatorsArgs {
id: epoch,
after: after.as_deref(),
before: before.as_deref(),
first,
last,
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
if let Some(validators) = response
.data
.and_then(|d| d.epoch)
.and_then(|v| v.validator_set)
{
let page_info = validators.active_validators.page_info;
let nodes = validators
.active_validators
.nodes
.into_iter()
.collect::<Vec<_>>();
Ok(Page::new(page_info, nodes))
} else {
Ok(Page::new_empty())
}
}
pub async fn total_transaction_blocks_by_digest(
&self,
digest: CheckpointDigest,
) -> Result<Option<u64>> {
self.internal_total_transaction_blocks(Some(digest.to_string()), None)
.await
}
pub async fn total_transaction_blocks_by_seq_num(&self, seq_num: u64) -> Result<Option<u64>> {
self.internal_total_transaction_blocks(None, Some(seq_num))
.await
}
pub async fn total_transaction_blocks(&self) -> Result<Option<u64>> {
self.internal_total_transaction_blocks(None, None).await
}
async fn internal_total_transaction_blocks(
&self,
digest: Option<String>,
seq_num: Option<u64>,
) -> Result<Option<u64>> {
if digest.is_some() && seq_num.is_some() {
return Err(Error::from_error(
Kind::Other,
"Conflicting arguments: either digest or seq_num can be provided, but not both.",
));
}
let operation = CheckpointTotalTxQuery::build(CheckpointArgs {
id: CheckpointId {
digest,
sequence_number: seq_num,
},
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
Ok(response
.data
.and_then(|x| x.checkpoint)
.and_then(|c| c.network_total_transactions))
}
pub async fn balance(&self, address: Address, coin_type: Option<&str>) -> Result<Option<u128>> {
let operation = BalanceQuery::build(BalanceArgs {
address,
coin_type: coin_type.map(|x| x.to_string()),
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
let total_balance = response
.data
.map(|b| b.owner.and_then(|o| o.balance.map(|b| b.total_balance)))
.ok_or_else(Error::empty_response_error)?
.flatten()
.map(|x| x.0.parse::<u128>())
.transpose()?;
Ok(total_balance)
}
pub async fn coins(
&self,
owner: Address,
coin_type: Option<&str>,
pagination_filter: PaginationFilter,
) -> Result<Page<Coin>> {
let response = self
.objects(
Some(ObjectFilter {
type_: Some(coin_type.unwrap_or("0x2::coin::Coin")),
owner: Some(owner),
object_ids: None,
}),
pagination_filter,
)
.await?;
Ok(Page::new(
response.page_info,
response
.data
.iter()
.flat_map(Coin::try_from_object)
.map(|c| c.into_owned())
.collect::<Vec<_>>(),
))
}
pub async fn coins_stream(
&self,
address: Address,
coin_type: Option<&'static str>,
streaming_direction: Direction,
) -> impl Stream<Item = Result<Coin>> {
stream_paginated_query(
move |filter| self.coins(address, coin_type, filter),
streaming_direction,
)
}
pub async fn coin_metadata(&self, coin_type: &str) -> Result<Option<CoinMetadata>> {
let operation = CoinMetadataQuery::build(CoinMetadataArgs { coin_type });
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
Ok(response.data.and_then(|x| x.coin_metadata))
}
pub async fn total_supply(&self, coin_type: &str) -> Result<Option<u64>> {
let coin_metadata = self.coin_metadata(coin_type).await?;
coin_metadata
.and_then(|c| c.supply)
.map(|c| c.try_into())
.transpose()
}
pub async fn checkpoint(
&self,
digest: Option<CheckpointDigest>,
seq_num: Option<u64>,
) -> Result<Option<CheckpointSummary>> {
if digest.is_some() && seq_num.is_some() {
return Err(Error::from_error(
Kind::Other,
"either digest or seq_num must be provided",
));
}
let operation = CheckpointQuery::build(CheckpointArgs {
id: CheckpointId {
digest: digest.map(|d| d.to_string()),
sequence_number: seq_num,
},
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
response
.data
.map(|c| c.checkpoint.map(|c| c.try_into()).transpose())
.ok_or(Error::empty_response_error())?
}
pub async fn checkpoints(
&self,
pagination_filter: PaginationFilter,
) -> Result<Page<CheckpointSummary>> {
let (after, before, first, last) = self.pagination_filter(pagination_filter).await;
let operation = CheckpointsQuery::build(CheckpointsArgs {
after: after.as_deref(),
before: before.as_deref(),
first,
last,
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
if let Some(checkpoints) = response.data {
let cc = checkpoints.checkpoints;
let page_info = cc.page_info;
let nodes = cc
.nodes
.into_iter()
.map(|c| c.try_into())
.collect::<Result<Vec<CheckpointSummary>, _>>()?;
Ok(Page::new(page_info, nodes))
} else {
Ok(Page::new_empty())
}
}
pub async fn checkpoints_stream(
&self,
streaming_direction: Direction,
) -> impl Stream<Item = Result<CheckpointSummary>> + '_ {
stream_paginated_query(move |filter| self.checkpoints(filter), streaming_direction)
}
pub async fn latest_checkpoint_sequence_number(
&self,
) -> Result<Option<CheckpointSequenceNumber>> {
Ok(self
.checkpoint(None, None)
.await?
.map(|c| c.sequence_number))
}
pub async fn dynamic_field(
&self,
address: Address,
type_: TypeTag,
name: impl Into<NameValue>,
) -> Result<Option<DynamicFieldOutput>> {
let bcs = name.into().0;
let operation = DynamicFieldQuery::build(DynamicFieldArgs {
address,
name: crate::query_types::DynamicFieldName {
type_: type_.to_string(),
bcs: crate::query_types::Base64(base64ct::Base64::encode_string(&bcs)),
},
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
let result = response
.data
.and_then(|d| d.owner)
.and_then(|o| o.dynamic_field)
.map(|df| df.try_into())
.transpose()?;
Ok(result)
}
pub async fn dynamic_object_field(
&self,
address: Address,
type_: TypeTag,
name: impl Into<NameValue>,
) -> Result<Option<DynamicFieldOutput>> {
let bcs = name.into().0;
let operation = DynamicObjectFieldQuery::build(DynamicFieldArgs {
address,
name: crate::query_types::DynamicFieldName {
type_: type_.to_string(),
bcs: crate::query_types::Base64(base64ct::Base64::encode_string(&bcs)),
},
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
let result: Option<DynamicFieldOutput> = response
.data
.and_then(|d| d.owner)
.and_then(|o| o.dynamic_object_field)
.map(|df| df.try_into())
.transpose()?;
Ok(result)
}
pub async fn dynamic_fields(
&self,
address: Address,
pagination_filter: PaginationFilter,
) -> Result<Page<DynamicFieldOutput>> {
let (after, before, first, last) = self.pagination_filter(pagination_filter).await;
let operation = DynamicFieldsOwnerQuery::build(DynamicFieldConnectionArgs {
address,
after: after.as_deref(),
before: before.as_deref(),
first,
last,
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
let Some(DynamicFieldsOwnerQuery { owner: Some(dfs) }) = response.data else {
return Ok(Page::new_empty());
};
Ok(Page::new(
dfs.dynamic_fields.page_info,
dfs.dynamic_fields
.nodes
.into_iter()
.map(TryInto::try_into)
.collect::<Result<Vec<_>>>()?,
))
}
pub async fn dynamic_fields_stream(
&self,
address: Address,
streaming_direction: Direction,
) -> impl Stream<Item = Result<DynamicFieldOutput>> + '_ {
stream_paginated_query(
move |filter| self.dynamic_fields(address, filter),
streaming_direction,
)
}
pub async fn epoch(&self, epoch: Option<u64>) -> Result<Option<Epoch>> {
let operation = EpochQuery::build(EpochArgs { id: epoch });
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
Ok(response.data.and_then(|d| d.epoch))
}
pub async fn epochs(&self, pagination_filter: PaginationFilter) -> Result<Page<Epoch>> {
let (after, before, first, last) = self.pagination_filter(pagination_filter).await;
let operation = EpochsQuery::build(EpochsArgs {
after: after.as_deref(),
before: before.as_deref(),
first,
last,
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
if let Some(epochs) = response.data {
Ok(Page::new(epochs.epochs.page_info, epochs.epochs.nodes))
} else {
Ok(Page::new_empty())
}
}
pub async fn epochs_stream(
&self,
streaming_direction: Direction,
) -> impl Stream<Item = Result<Epoch>> + '_ {
stream_paginated_query(
move |pag_filter| self.epochs(pag_filter),
streaming_direction,
)
}
pub async fn epoch_total_checkpoints(&self, epoch: Option<u64>) -> Result<Option<u64>> {
let response = self.epoch_summary(epoch).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
Ok(response
.data
.and_then(|d| d.epoch)
.and_then(|e| e.total_checkpoints))
}
pub async fn epoch_total_transaction_blocks(&self, epoch: Option<u64>) -> Result<Option<u64>> {
let response = self.epoch_summary(epoch).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
Ok(response
.data
.and_then(|d| d.epoch)
.and_then(|e| e.total_transactions))
}
async fn epoch_summary(
&self,
epoch: Option<u64>,
) -> Result<GraphQlResponse<EpochSummaryQuery>> {
let operation = EpochSummaryQuery::build(EpochArgs { id: epoch });
self.run_query(&operation).await
}
pub async fn events(
&self,
filter: Option<EventFilter>,
pagination_filter: PaginationFilter,
) -> Result<Page<(Event, TransactionDigest)>> {
let (after, before, first, last) = self.pagination_filter(pagination_filter).await;
let operation = EventsQuery::build(EventsQueryArgs {
filter,
after: after.as_deref(),
before: before.as_deref(),
first,
last,
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
if let Some(events) = response.data {
let ec = events.events;
let page_info = ec.page_info;
let events_with_digests = ec
.nodes
.into_iter()
.map(|node| -> Result<(Event, TransactionDigest)> {
let event =
bcs::from_bytes::<Event>(&base64ct::Base64::decode_vec(&node.bcs.0)?)?;
let tx_digest = node
.transaction_block
.ok_or_else(Error::empty_response_error)?
.digest
.ok_or_else(|| {
Error::from_error(
Kind::Deserialization,
"Expected a transaction digest for this event, but it is missing.",
)
})?;
let tx_digest = TransactionDigest::from_base58(&tx_digest)?;
Ok((event, tx_digest))
})
.collect::<Result<Vec<_>>>()?;
Ok(Page::new(page_info, events_with_digests))
} else {
Ok(Page::new_empty())
}
}
pub async fn events_stream(
&self,
filter: Option<EventFilter>,
streaming_direction: Direction,
) -> impl Stream<Item = Result<(Event, TransactionDigest)>> + '_ {
stream_paginated_query(
move |pag_filter| self.events(filter.clone(), pag_filter),
streaming_direction,
)
}
pub async fn object(&self, address: Address, version: Option<u64>) -> Result<Option<Object>> {
let operation = ObjectQuery::build(ObjectQueryArgs { address, version });
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
if let Some(object) = response.data {
let obj = object.object;
let bcs = obj
.and_then(|o| o.bcs)
.map(|bcs| base64ct::Base64::decode_vec(bcs.0.as_str()))
.transpose()?;
let object = bcs
.map(|b| bcs::from_bytes::<sui_types::Object>(&b))
.transpose()?;
Ok(object)
} else {
Ok(None)
}
}
pub async fn objects(
&self,
filter: Option<ObjectFilter<'_>>,
pagination_filter: PaginationFilter,
) -> Result<Page<Object>> {
let (after, before, first, last) = self.pagination_filter(pagination_filter).await;
let operation = ObjectsQuery::build(ObjectsQueryArgs {
after: after.as_deref(),
before: before.as_deref(),
filter,
first,
last,
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
if let Some(objects) = response.data {
let oc = objects.objects;
let page_info = oc.page_info;
let bcs = oc
.nodes
.iter()
.map(|o| &o.bcs)
.filter_map(|b64| {
b64.as_ref()
.map(|b| base64ct::Base64::decode_vec(b.0.as_str()))
})
.collect::<Result<Vec<_>, base64ct::Error>>()?;
let objects = bcs
.iter()
.map(|b| bcs::from_bytes::<sui_types::Object>(b))
.collect::<Result<Vec<_>, bcs::Error>>()?;
Ok(Page::new(page_info, objects))
} else {
Ok(Page::new_empty())
}
}
pub async fn objects_stream<'a>(
&'a self,
filter: Option<ObjectFilter<'a>>,
streaming_direction: Direction,
) -> impl Stream<Item = Result<Object>> + 'a {
stream_paginated_query(
move |pag_filter| self.objects(filter.clone(), pag_filter),
streaming_direction,
)
}
pub async fn object_bcs(&self, object_id: Address) -> Result<Option<Vec<u8>>> {
let operation = ObjectQuery::build(ObjectQueryArgs {
address: object_id,
version: None,
});
let response = self.run_query(&operation).await.unwrap();
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
if let Some(object) = response.data.map(|d| d.object) {
Ok(object
.and_then(|o| o.bcs)
.map(|bcs| base64ct::Base64::decode_vec(bcs.0.as_str()))
.transpose()?)
} else {
Ok(None)
}
}
pub async fn move_object_contents(
&self,
address: Address,
version: Option<u64>,
) -> Result<Option<serde_json::Value>> {
let operation = ObjectQuery::build(ObjectQueryArgs { address, version });
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
if let Some(object) = response.data {
Ok(object
.object
.and_then(|o| o.as_move_object)
.and_then(|o| o.contents)
.and_then(|mv| mv.json))
} else {
Ok(None)
}
}
pub async fn move_object_contents_bcs(
&self,
address: Address,
version: Option<u64>,
) -> Result<Option<Vec<u8>>> {
let operation = ObjectQuery::build(ObjectQueryArgs { address, version });
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
if let Some(object) = response.data {
Ok(object
.object
.and_then(|o| o.as_move_object)
.and_then(|o| o.contents)
.map(|bcs| base64ct::Base64::decode_vec(bcs.bcs.0.as_str()))
.transpose()?)
} else {
Ok(None)
}
}
pub async fn package(
&self,
address: Address,
version: Option<u64>,
) -> Result<Option<MovePackage>> {
let operation = PackageQuery::build(PackageArgs { address, version });
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
Ok(response
.data
.and_then(|x| x.package)
.and_then(|x| x.package_bcs)
.map(|bcs| base64ct::Base64::decode_vec(bcs.0.as_str()))
.transpose()?
.map(|bcs| bcs::from_bytes::<MovePackage>(&bcs))
.transpose()?)
}
pub async fn package_versions(
&self,
address: Address,
pagination_filter: PaginationFilter,
after_version: Option<u64>,
before_version: Option<u64>,
) -> Result<Page<MovePackage>> {
let (after, before, first, last) = self.pagination_filter(pagination_filter).await;
let operation = PackageVersionsQuery::build(PackageVersionsArgs {
address,
after: after.as_deref(),
before: before.as_deref(),
first,
last,
filter: Some(MovePackageVersionFilter {
after_version,
before_version,
}),
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
if let Some(packages) = response.data {
let pc = packages.package_versions;
let page_info = pc.page_info;
let bcs = pc
.nodes
.iter()
.map(|p| &p.package_bcs)
.filter_map(|b64| {
b64.as_ref()
.map(|b| base64ct::Base64::decode_vec(b.0.as_str()))
})
.collect::<Result<Vec<_>, base64ct::Error>>()?;
let packages = bcs
.iter()
.map(|b| bcs::from_bytes::<MovePackage>(b))
.collect::<Result<Vec<_>, bcs::Error>>()?;
Ok(Page::new(page_info, packages))
} else {
Ok(Page::new_empty())
}
}
pub async fn package_latest(&self, address: Address) -> Result<Option<MovePackage>> {
let operation = LatestPackageQuery::build(PackageArgs {
address,
version: None,
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
let pkg = response
.data
.and_then(|x| x.latest_package)
.and_then(|x| x.package_bcs)
.map(|bcs| base64ct::Base64::decode_vec(bcs.0.as_str()))
.transpose()?
.map(|bcs| bcs::from_bytes::<MovePackage>(&bcs))
.transpose()?;
Ok(pkg)
}
pub async fn package_by_name(&self, name: &str) -> Result<Option<MovePackage>> {
let operation = PackageByNameQuery::build(PackageByNameArgs { name });
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
Ok(response
.data
.and_then(|x| x.package_by_name)
.and_then(|x| x.package_bcs)
.and_then(|bcs| base64ct::Base64::decode_vec(bcs.0.as_str()).ok())
.and_then(|bcs| bcs::from_bytes::<MovePackage>(&bcs).ok()))
}
pub async fn packages(
&self,
pagination_filter: PaginationFilter,
after_checkpoint: Option<u64>,
before_checkpoint: Option<u64>,
) -> Result<Page<MovePackage>> {
let (after, before, first, last) = self.pagination_filter(pagination_filter).await;
let operation = PackagesQuery::build(PackagesQueryArgs {
after: after.as_deref(),
before: before.as_deref(),
first,
last,
filter: Some(PackageCheckpointFilter {
after_checkpoint,
before_checkpoint,
}),
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
if let Some(packages) = response.data {
let pc = packages.packages;
let page_info = pc.page_info;
let bcs = pc
.nodes
.iter()
.map(|p| &p.package_bcs)
.filter_map(|b64| {
b64.as_ref()
.map(|b| base64ct::Base64::decode_vec(b.0.as_str()))
})
.collect::<Result<Vec<_>, base64ct::Error>>()?;
let packages = bcs
.iter()
.map(|b| bcs::from_bytes::<MovePackage>(b))
.collect::<Result<Vec<_>, bcs::Error>>()?;
Ok(Page::new(page_info, packages))
} else {
Ok(Page::new_empty())
}
}
pub async fn dry_run_tx(
&self,
tx: &Transaction,
skip_checks: Option<bool>,
) -> Result<DryRunResult> {
let tx_bytes = base64ct::Base64::encode_string(&bcs::to_bytes(&tx)?);
self.dry_run(tx_bytes, skip_checks, None).await
}
pub async fn dry_run_tx_kind(
&self,
tx_kind: &TransactionKind,
skip_checks: Option<bool>,
tx_meta: TransactionMetadata,
) -> Result<DryRunResult> {
let tx_bytes = base64ct::Base64::encode_string(&bcs::to_bytes(&tx_kind)?);
self.dry_run(tx_bytes, skip_checks, Some(tx_meta)).await
}
async fn dry_run(
&self,
tx_bytes: String,
skip_checks: Option<bool>,
tx_meta: Option<TransactionMetadata>,
) -> Result<DryRunResult> {
let skip_checks = skip_checks.unwrap_or(false);
let operation = DryRunQuery::build(DryRunArgs {
tx_bytes,
skip_checks,
tx_meta,
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
let error = response
.data
.as_ref()
.and_then(|tx| tx.dry_run_transaction_block.error.clone());
let effects = response
.data
.map(|tx| tx.dry_run_transaction_block)
.and_then(|tx| tx.transaction)
.and_then(|tx| tx.effects)
.and_then(|bcs| bcs.bcs)
.map(|bcs| base64ct::Base64::decode_vec(bcs.0.as_str()))
.transpose()?
.map(|bcs| bcs::from_bytes::<TransactionEffects>(&bcs))
.transpose()?;
Ok(DryRunResult { effects, error })
}
pub async fn transaction(
&self,
digest: TransactionDigest,
) -> Result<Option<SignedTransaction>> {
let operation = TransactionBlockQuery::build(TransactionBlockArgs {
digest: digest.to_string(),
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
response
.data
.and_then(|d| d.transaction_block)
.map(|tx| tx.try_into())
.transpose()
}
pub async fn transaction_effects(
&self,
digest: TransactionDigest,
) -> Result<Option<TransactionEffects>> {
let operation = TransactionBlockEffectsQuery::build(TransactionBlockArgs {
digest: digest.to_string(),
});
let response = self.run_query(&operation).await?;
response
.data
.and_then(|d| d.transaction_block)
.map(|tx| tx.try_into())
.transpose()
}
pub async fn transaction_data_effects(
&self,
digest: TransactionDigest,
) -> Result<Option<TransactionDataEffects>> {
let operation = TransactionBlockWithEffectsQuery::build(TransactionBlockArgs {
digest: digest.to_string(),
});
let response = self.run_query(&operation).await?;
let tx = response
.data
.and_then(|d| d.transaction_block)
.map(|tx| (tx.bcs, tx.effects, tx.signatures));
match tx {
Some((Some(bcs), Some(effects), Some(sigs))) => {
let bcs = base64ct::Base64::decode_vec(bcs.0.as_str())?;
let effects = base64ct::Base64::decode_vec(effects.bcs.unwrap().0.as_str())?;
let signatures = sigs
.iter()
.map(|s| UserSignature::from_base64(&s.0))
.collect::<Result<Vec<_>, _>>()?;
let transaction: Transaction = bcs::from_bytes(&bcs)?;
let tx = SignedTransaction {
transaction,
signatures,
};
let effects: TransactionEffects = bcs::from_bytes(&effects)?;
Ok(Some(TransactionDataEffects { tx, effects }))
}
_ => Ok(None),
}
}
pub async fn transactions(
&self,
filter: Option<TransactionsFilter<'_>>,
pagination_filter: PaginationFilter,
) -> Result<Page<SignedTransaction>> {
let (after, before, first, last) = self.pagination_filter(pagination_filter).await;
let operation = TransactionBlocksQuery::build(TransactionBlocksQueryArgs {
after: after.as_deref(),
before: before.as_deref(),
filter,
first,
last,
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
if let Some(txb) = response.data {
let txc = txb.transaction_blocks;
let page_info = txc.page_info;
let transactions = txc
.nodes
.into_iter()
.map(|n| n.try_into())
.collect::<Result<Vec<_>>>()?;
let page = Page::new(page_info, transactions);
Ok(page)
} else {
Ok(Page::new_empty())
}
}
pub async fn transactions_effects(
&self,
filter: Option<TransactionsFilter<'_>>,
pagination_filter: PaginationFilter,
) -> Result<Page<TransactionEffects>> {
let (after, before, first, last) = self.pagination_filter(pagination_filter).await;
let operation = TransactionBlocksEffectsQuery::build(TransactionBlocksQueryArgs {
after: after.as_deref(),
before: before.as_deref(),
filter,
first,
last,
});
let response = self.run_query(&operation).await?;
if let Some(txb) = response.data {
let txc = txb.transaction_blocks;
let page_info = txc.page_info;
let transactions = txc
.nodes
.into_iter()
.map(|n| n.try_into())
.collect::<Result<Vec<_>>>()?;
let page = Page::new(page_info, transactions);
Ok(page)
} else {
Ok(Page::new_empty())
}
}
pub async fn transactions_data_effects(
&self,
filter: Option<TransactionsFilter<'_>>,
pagination_filter: PaginationFilter,
) -> Result<Page<TransactionDataEffects>> {
let (after, before, first, last) = self.pagination_filter(pagination_filter).await;
let operation = TransactionBlocksWithEffectsQuery::build(TransactionBlocksQueryArgs {
after: after.as_deref(),
before: before.as_deref(),
filter,
first,
last,
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
if let Some(txb) = response.data {
let txc = txb.transaction_blocks;
let page_info = txc.page_info;
let transactions = {
txc.nodes
.iter()
.map(|node| {
match (
node.bcs.as_ref(),
node.effects.as_ref(),
node.signatures.as_ref(),
) {
(Some(bcs), Some(effects), Some(sigs)) => {
let bcs = base64ct::Base64::decode_vec(bcs.0.as_str())?;
let effects = base64ct::Base64::decode_vec(
effects.bcs.as_ref().unwrap().0.as_str(),
)?;
let sigs = sigs
.iter()
.map(|s| UserSignature::from_base64(&s.0))
.collect::<Result<Vec<_>, _>>()?;
let tx: Transaction = bcs::from_bytes(&bcs)?;
let tx = SignedTransaction {
transaction: tx,
signatures: sigs,
};
let effects: TransactionEffects = bcs::from_bytes(&effects)?;
Ok(TransactionDataEffects { tx, effects })
}
(_, _, _) => Err(Error::empty_response_error()),
}
})
.collect::<Result<Vec<_>>>()?
};
let page = Page::new(page_info, transactions);
Ok(page)
} else {
Ok(Page::new_empty())
}
}
pub async fn transactions_stream<'a>(
&'a self,
filter: Option<TransactionsFilter<'a>>,
streaming_direction: Direction,
) -> impl Stream<Item = Result<SignedTransaction>> + 'a {
stream_paginated_query(
move |pag_filter| self.transactions(filter.clone(), pag_filter),
streaming_direction,
)
}
pub async fn transactions_effects_stream<'a>(
&'a self,
filter: Option<TransactionsFilter<'a>>,
streaming_direction: Direction,
) -> impl Stream<Item = Result<TransactionEffects>> + 'a {
stream_paginated_query(
move |pag_filter| self.transactions_effects(filter.clone(), pag_filter),
streaming_direction,
)
}
pub async fn execute_tx(
&self,
signatures: Vec<UserSignature>,
tx: &Transaction,
) -> Result<Option<TransactionEffects>> {
let operation = ExecuteTransactionQuery::build(ExecuteTransactionArgs {
signatures: signatures.iter().map(|s| s.to_base64()).collect(),
tx_bytes: base64ct::Base64::encode_string(bcs::to_bytes(tx).unwrap().as_ref()),
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
if let Some(data) = response.data {
let result = data.execute_transaction_block;
let bcs = base64ct::Base64::decode_vec(result.effects.bcs.0.as_str())?;
let effects: TransactionEffects = bcs::from_bytes(&bcs)?;
Ok(Some(effects))
} else {
Ok(None)
}
}
pub async fn normalized_move_function(
&self,
package: &str,
module: &str,
function: &str,
version: Option<u64>,
) -> Result<Option<MoveFunction>> {
let operation = NormalizedMoveFunctionQuery::build(NormalizedMoveFunctionQueryArgs {
address: Address::from_str(package)?,
module,
function,
version,
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
Ok(response
.data
.and_then(|p| p.package)
.and_then(|p| p.module)
.and_then(|m| m.function))
}
#[allow(clippy::too_many_arguments)]
pub async fn normalized_move_module(
&self,
package: &str,
module: &str,
version: Option<u64>,
pagination_filter_enums: PaginationFilter,
pagination_filter_friends: PaginationFilter,
pagination_filter_functions: PaginationFilter,
pagination_filter_structs: PaginationFilter,
) -> Result<Option<MoveModule>> {
let (after_enums, before_enums, first_enums, last_enums) =
self.pagination_filter(pagination_filter_enums).await;
let (after_friends, before_friends, first_friends, last_friends) =
self.pagination_filter(pagination_filter_friends).await;
let (after_functions, before_functions, first_functions, last_functions) =
self.pagination_filter(pagination_filter_functions).await;
let (after_structs, before_structs, first_structs, last_structs) =
self.pagination_filter(pagination_filter_structs).await;
let operation = NormalizedMoveModuleQuery::build(NormalizedMoveModuleQueryArgs {
package: Address::from_str(package)?,
module,
version,
after_enums: after_enums.as_deref(),
after_functions: after_functions.as_deref(),
after_structs: after_structs.as_deref(),
after_friends: after_friends.as_deref(),
before_enums: before_enums.as_deref(),
before_functions: before_functions.as_deref(),
before_structs: before_structs.as_deref(),
before_friends: before_friends.as_deref(),
first_enums,
first_functions,
first_structs,
first_friends,
last_enums,
last_functions,
last_structs,
last_friends,
});
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
Ok(response.data.and_then(|p| p.package).and_then(|p| p.module))
}
pub async fn resolve_suins_to_address(&self, domain: &str) -> Result<Option<Address>> {
let operation = ResolveSuinsQuery::build(ResolveSuinsQueryArgs { name: domain });
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
Ok(response
.data
.and_then(|d| d.resolve_suins_address)
.map(|a| a.address))
}
pub async fn default_suins_name(&self, address: Address) -> Result<Option<String>> {
let operation = DefaultSuinsNameQuery::build(DefaultSuinsNameQueryArgs { address });
let response = self.run_query(&operation).await?;
if let Some(errors) = response.errors {
return Err(Error::graphql_error(errors));
}
Ok(response
.data
.and_then(|d| d.address)
.and_then(|a| a.default_suins_name))
}
}
#[cfg(test)]
mod tests {
use base64ct::Encoding;
use futures::StreamExt;
use sui_types::Ed25519PublicKey;
use sui_types::TypeTag;
use crate::faucet::FaucetClient;
use crate::BcsName;
use crate::Client;
use crate::Direction;
use crate::PaginationFilter;
use crate::DEVNET_HOST;
use crate::LOCAL_HOST;
use crate::MAINNET_HOST;
use crate::TESTNET_HOST;
use tokio::time;
const NUM_COINS_FROM_FAUCET: usize = 5;
fn test_client() -> Client {
let network = std::env::var("NETWORK").unwrap_or_else(|_| "local".to_string());
match network.as_str() {
"mainnet" => Client::new_mainnet(),
"testnet" => Client::new_testnet(),
"devnet" => Client::new_devnet(),
"local" => Client::new_localhost(),
_ => Client::new(&network).expect("Invalid network URL: {network}"),
}
}
#[test]
fn test_rpc_server() {
let mut client = Client::new_mainnet();
assert_eq!(client.rpc_server(), MAINNET_HOST);
client.set_rpc_server(TESTNET_HOST).unwrap();
assert_eq!(client.rpc_server(), TESTNET_HOST);
client.set_rpc_server(DEVNET_HOST).unwrap();
assert_eq!(client.rpc_server(), DEVNET_HOST);
client.set_rpc_server(LOCAL_HOST).unwrap();
assert_eq!(client.rpc_server(), LOCAL_HOST);
assert!(client.set_rpc_server("localhost:9125/graphql").is_ok());
assert!(client.set_rpc_server("9125/graphql").is_err());
}
#[tokio::test]
async fn test_balance_query() {
let client = test_client();
let balance = client.balance("0x1".parse().unwrap(), None).await;
assert!(
balance.is_ok(),
"Balance query failed for {} network",
client.rpc_server()
);
}
#[tokio::test]
async fn test_chain_id() {
let client = test_client();
let chain_id = client.chain_id().await;
assert!(chain_id.is_ok());
}
#[tokio::test]
async fn test_reference_gas_price_query() {
let client = test_client();
let rgp = client.reference_gas_price(None).await;
assert!(
rgp.is_ok(),
"Reference gas price query failed for {} network",
client.rpc_server()
);
}
#[tokio::test]
async fn test_protocol_config_query() {
let client = test_client();
let pc = client.protocol_config(None).await;
assert!(pc.is_ok());
let pc = client.protocol_config(Some(50)).await;
assert!(pc.is_ok());
let pc = pc.unwrap();
if let Some(pc) = pc {
assert_eq!(
pc.protocol_version,
50,
"Protocol version query mismatch for {} network. Expected: 50, received: {}",
client.rpc_server(),
pc.protocol_version
);
}
}
#[tokio::test]
async fn test_service_config_query() {
let client = test_client();
let sc = client.service_config().await;
assert!(
sc.is_ok(),
"Service config query failed for {} network",
client.rpc_server()
);
}
#[tokio::test]
async fn test_active_validators() {
let client = test_client();
let av = client
.active_validators(None, PaginationFilter::default())
.await;
assert!(
av.is_ok(),
"Active validators query failed for {} network. Error: {}",
client.rpc_server(),
av.unwrap_err()
);
assert!(
!av.unwrap().is_empty(),
"Active validators query returned None for {} network",
client.rpc_server()
);
}
#[tokio::test]
async fn test_coin_metadata_query() {
let client = test_client();
let cm = client.coin_metadata("0x2::sui::SUI").await;
assert!(
cm.is_ok(),
"Coin metadata query failed for {} network",
client.rpc_server()
);
}
#[tokio::test]
async fn test_checkpoint_query() {
let client = test_client();
let c = client.checkpoint(None, None).await;
assert!(
c.is_ok(),
"Checkpoint query failed for {} network. Error: {}",
client.rpc_server(),
c.unwrap_err()
);
}
#[tokio::test]
async fn test_checkpoints_query() {
let client = test_client();
let c = client.checkpoints(PaginationFilter::default()).await;
assert!(
c.is_ok(),
"Checkpoints query failed for {} network. Error: {}",
client.rpc_server(),
c.unwrap_err()
);
}
#[tokio::test]
async fn test_latest_checkpoint_sequence_number_query() {
let client = test_client();
let last_checkpoint = client.latest_checkpoint_sequence_number().await;
assert!(
last_checkpoint.is_ok(),
"Latest checkpoint sequence number query failed for {} network. Error: {}",
client.rpc_server(),
last_checkpoint.unwrap_err()
);
}
#[tokio::test]
async fn test_epoch_query() {
let client = test_client();
let e = client.epoch(None).await;
assert!(
e.is_ok(),
"Epoch query failed for {} network. Error: {}",
client.rpc_server(),
e.unwrap_err()
);
assert!(
e.unwrap().is_some(),
"Epoch query returned None for {} network",
client.rpc_server()
);
}
#[tokio::test]
async fn test_epoch_total_checkpoints_query() {
let client = test_client();
let e = client.epoch_total_checkpoints(None).await;
assert!(
e.is_ok(),
"Epoch total checkpoints query failed for {} network. Error: {}",
client.rpc_server(),
e.unwrap_err()
);
}
#[tokio::test]
async fn test_epoch_total_transaction_blocks_query() {
let client = test_client();
let e = client.epoch_total_transaction_blocks(None).await;
assert!(
e.is_ok(),
"Epoch total transaction blocks query failed for {} network. Error: {}",
client.rpc_server(),
e.unwrap_err()
);
}
#[tokio::test]
async fn test_epoch_summary_query() {
let client = test_client();
let e = client.epoch_summary(None).await;
assert!(
e.is_ok(),
"Epoch summary query failed for {} network. Error: {}",
client.rpc_server(),
e.unwrap_err()
);
}
#[tokio::test]
async fn test_events_query() {
let client = test_client();
let events = client.events(None, PaginationFilter::default()).await;
assert!(
events.is_ok(),
"Events query failed for {} network. Error: {}",
client.rpc_server(),
events.unwrap_err()
);
assert!(
!events.unwrap().is_empty(),
"Events query returned no data for {} network",
client.rpc_server()
);
}
#[tokio::test]
async fn test_objects_query() {
let client = test_client();
let objects = client.objects(None, PaginationFilter::default()).await;
assert!(
objects.is_ok(),
"Objects query failed for {} network. Error: {}",
client.rpc_server(),
objects.unwrap_err()
);
}
#[tokio::test]
async fn test_object_query() {
let client = test_client();
let object = client.object("0x5".parse().unwrap(), None).await;
assert!(
object.is_ok(),
"Object query failed for {} network. Error: {}",
client.rpc_server(),
object.unwrap_err()
);
}
#[tokio::test]
async fn test_object_bcs_query() {
let client = test_client();
let object_bcs = client.object_bcs("0x5".parse().unwrap()).await;
assert!(
object_bcs.is_ok(),
"Object bcs query failed for {} network. Error: {}",
client.rpc_server(),
object_bcs.unwrap_err()
);
}
#[tokio::test]
async fn test_coins_query() {
let client = test_client();
let coins = client
.coins("0x1".parse().unwrap(), None, PaginationFilter::default())
.await;
assert!(
coins.is_ok(),
"Coins query failed for {} network. Error: {}",
client.rpc_server(),
coins.unwrap_err()
);
}
#[tokio::test]
async fn test_coins_stream() {
let client = test_client();
let faucet = match client.rpc_server() {
LOCAL_HOST => FaucetClient::local(),
TESTNET_HOST => FaucetClient::testnet(),
DEVNET_HOST => FaucetClient::devnet(),
_ => return,
};
let key = Ed25519PublicKey::generate(rand::thread_rng());
let address = key.derive_address();
faucet.request_and_wait(address).await.unwrap();
const MAX_RETRIES: u32 = 10;
const RETRY_DELAY: time::Duration = time::Duration::from_secs(1);
let mut num_coins = 0;
for attempt in 0..MAX_RETRIES {
let mut stream = client
.coins_stream(address, None, Direction::default())
.await;
while let Some(result) = stream.next().await {
match result {
Ok(_) => num_coins += 1,
Err(_) => {
if attempt < MAX_RETRIES - 1 {
time::sleep(RETRY_DELAY).await;
num_coins = 0;
break;
}
}
}
}
}
assert!(num_coins >= NUM_COINS_FROM_FAUCET);
}
#[tokio::test]
async fn test_transaction_effects_query() {
let client = test_client();
let transactions = client
.transactions(None, PaginationFilter::default())
.await
.unwrap();
let tx_digest = transactions.data()[0].transaction.digest();
let effects = client.transaction_effects(tx_digest).await.unwrap();
assert!(
effects.is_some(),
"Transaction effects query failed for {} network.",
client.rpc_server(),
);
}
#[tokio::test]
async fn test_transactions_effects_query() {
let client = test_client();
let txs_effects = client
.transactions_effects(None, PaginationFilter::default())
.await;
assert!(
txs_effects.is_ok(),
"Transactions effects query failed for {} network. Error: {}",
client.rpc_server(),
txs_effects.unwrap_err()
);
}
#[tokio::test]
async fn test_transactions_query() {
let client = test_client();
let transactions = client.transactions(None, PaginationFilter::default()).await;
assert!(
transactions.is_ok(),
"Transactions query failed for {} network. Error: {}",
client.rpc_server(),
transactions.unwrap_err()
);
}
#[tokio::test]
async fn test_total_supply() {
let client = test_client();
let ts = client.total_supply("0x2::sui::SUI").await;
assert!(
ts.is_ok(),
"Total supply query failed for {} network. Error: {}",
client.rpc_server(),
ts.unwrap_err()
);
assert_eq!(
ts.unwrap().unwrap(),
10_000_000_000,
"Total supply mismatch for {} network",
client.rpc_server()
);
}
#[tokio::test]
async fn test_dry_run() {
let client = Client::new_testnet();
let tx_bytes = "AAACAAiA8PoCAAAAAAAg7q6yDns6nPznaKLd9pUD2K6NFiiibC10pDVQHJKdP2kCAgABAQAAAQECAAABAQBGLuHCJ/xjZfhC4vTJt/Zrvq1gexKLaKf3aVzyIkxRaAFUHzz8ftiZdY25qP4f9zySuT1K/qyTWjbGiTu0i0Z1ZFA4gwUAAAAAILeG86EeQm3qY3ajat3iUnY2Gbrk/NbdwV/d9MZviAwwRi7hwif8Y2X4QuL0ybf2a76tYHsSi2in92lc8iJMUWjoAwAAAAAAAECrPAAAAAAAAA==";
let dry_run = client.dry_run(tx_bytes.to_string(), None, None).await;
assert!(dry_run.is_ok());
}
#[tokio::test]
async fn test_dynamic_field_query() {
let client = test_client();
let bcs = base64ct::Base64::decode_vec("AgAAAAAAAAA=").unwrap();
let dynamic_field = client
.dynamic_field("0x5".parse().unwrap(), TypeTag::U64, BcsName(bcs))
.await;
assert!(dynamic_field.is_ok());
let dynamic_field = client
.dynamic_field("0x5".parse().unwrap(), TypeTag::U64, 2u64)
.await;
assert!(dynamic_field.is_ok());
}
#[tokio::test]
async fn test_dynamic_fields_query() {
let client = test_client();
let dynamic_fields = client
.dynamic_fields("0x5".parse().unwrap(), PaginationFilter::default())
.await;
assert!(
dynamic_fields.is_ok(),
"Dynamic fields query failed for {} network. Error: {}",
client.rpc_server(),
dynamic_fields.unwrap_err()
);
}
#[tokio::test]
async fn test_total_transaction_blocks() {
let client = test_client();
let total_transaction_blocks = client.total_transaction_blocks().await;
assert!(
total_transaction_blocks
.as_ref()
.is_ok_and(|f| f.is_some_and(|tx| tx > 0)),
"Total transaction blocks query failed for {} network. Error: {}",
client.rpc_server(),
total_transaction_blocks.unwrap_err()
);
let chckp = client.latest_checkpoint_sequence_number().await;
assert!(
chckp.is_ok(),
"Latest checkpoint sequence number query failed for {} network. Error: {}",
client.rpc_server(),
chckp.unwrap_err()
);
let chckp_id = chckp.unwrap().unwrap();
let total_transaction_blocks = client
.total_transaction_blocks_by_seq_num(chckp_id)
.await
.unwrap()
.unwrap();
assert!(total_transaction_blocks > 0);
let chckp = client
.checkpoint(None, Some(chckp_id))
.await
.unwrap()
.unwrap();
let digest = chckp.digest();
let total_transaction_blocks_by_digest =
client.total_transaction_blocks_by_digest(digest).await;
assert!(total_transaction_blocks_by_digest.is_ok());
assert_eq!(
total_transaction_blocks_by_digest.unwrap().unwrap(),
total_transaction_blocks
);
}
#[tokio::test]
async fn test_package() {
let client = test_client();
let package = client.package("0x2".parse().unwrap(), None).await;
assert!(
package.is_ok(),
"Package query failed for {} network. Error: {}",
client.rpc_server(),
package.unwrap_err()
);
assert!(
package.unwrap().is_some(),
"Package query returned None for {} network",
client.rpc_server()
);
}
#[tokio::test]
#[ignore] async fn test_package_by_name() {
let client = Client::new_testnet();
let package = client.package_by_name("sui@sui").await;
assert!(package.is_ok());
}
#[tokio::test]
async fn test_latest_package_query() {
let client = test_client();
let package = client.package_latest("0x2".parse().unwrap()).await;
assert!(
package.is_ok(),
"Latest package query failed for {} network. Error: {}",
client.rpc_server(),
package.unwrap_err()
);
assert!(
package.unwrap().is_some(),
"Latest package for 0x2 query returned None for {} network",
client.rpc_server()
);
}
#[tokio::test]
async fn test_packages_query() {
let client = test_client();
let packages = client
.packages(PaginationFilter::default(), None, None)
.await;
assert!(
packages.is_ok(),
"Packages query failed for {} network. Error: {}",
client.rpc_server(),
packages.unwrap_err()
);
assert!(
!packages.unwrap().is_empty(),
"Packages query returned no data for {} network",
client.rpc_server()
);
}
}