sui_rpc_benchmark/direct/
metrics.rs

1// Copyright (c) Mysten Labs, Inc.
2// SPDX-License-Identifier: Apache-2.0
3
4/// This module defines data structures and functions for collecting
5/// and summarizing performance metrics from benchmark queries.
6/// It tracks both overall and per-table counts, errors and average latencies.
7use dashmap::DashMap;
8use std::sync::Arc;
9use std::time::Duration;
10use tracing::debug;
11
12use super::query_template_generator::QueryTemplate;
13
14#[derive(Debug, Default)]
15pub struct QueryMetrics {
16    pub latency_ms: Vec<f64>,
17    pub errors: usize,
18    pub total_queries: usize,
19}
20
21#[derive(Debug)]
22pub struct BenchmarkResult {
23    pub total_queries: usize,
24    pub total_errors: usize,
25    pub avg_latency_ms: f64,
26    pub table_stats: Vec<TableStats>,
27}
28
29#[derive(Debug)]
30pub struct TableStats {
31    pub table_name: String,
32    pub queries: usize,
33    pub errors: usize,
34    pub avg_latency_ms: f64,
35}
36
37#[derive(Clone, Default)]
38pub struct MetricsCollector {
39    metrics: Arc<DashMap<String, QueryMetrics>>,
40}
41
42impl MetricsCollector {
43    /// Records a query execution with its latency and error status
44    ///
45    /// # Arguments
46    /// * `query_template` - The QueryTemplate being recorded
47    /// * `latency` - The duration taken to execute the query
48    /// * `is_error` - Whether the query resulted in an error
49    pub fn record_query(&self, query_template: QueryTemplate, latency: Duration, is_error: bool) {
50        let mut entry = self
51            .metrics
52            .entry(query_template.table_name.to_string())
53            .or_default();
54
55        entry.total_queries += 1;
56        if is_error {
57            entry.errors += 1;
58            debug!("Error executing query: {:?}", query_template);
59        } else {
60            entry.latency_ms.push(latency.as_secs_f64() * 1000.0);
61        }
62    }
63
64    pub fn generate_report(&self) -> BenchmarkResult {
65        let mut total_queries = 0;
66        let mut total_errors = 0;
67        let mut total_latency = 0.0;
68        let mut total_successful = 0;
69        let mut table_stats = Vec::new();
70
71        for entry in self.metrics.iter() {
72            let table_name = entry.key().clone();
73            let metrics = entry.value();
74            let successful = metrics.total_queries - metrics.errors;
75            let avg_latency = if successful > 0 {
76                metrics.latency_ms.iter().sum::<f64>() / successful as f64
77            } else {
78                0.0
79            };
80
81            table_stats.push(TableStats {
82                table_name,
83                queries: metrics.total_queries,
84                errors: metrics.errors,
85                avg_latency_ms: avg_latency,
86            });
87
88            total_queries += metrics.total_queries;
89            total_errors += metrics.errors;
90            total_latency += metrics.latency_ms.iter().sum::<f64>();
91            total_successful += successful;
92        }
93
94        table_stats.sort_by(|a, b| b.queries.cmp(&a.queries));
95
96        BenchmarkResult {
97            total_queries,
98            total_errors,
99            avg_latency_ms: if total_successful > 0 {
100                total_latency / total_successful as f64
101            } else {
102                0.0
103            },
104            table_stats,
105        }
106    }
107}