sui_adapter_latest/
temporary_store.rs

1// Copyright (c) Mysten Labs, Inc.
2// SPDX-License-Identifier: Apache-2.0
3
4use crate::gas_charger::{GasCharger, PaymentLocation};
5use mysten_common::{ZipDebugEqIteratorExt, debug_fatal};
6use mysten_metrics::monitored_scope;
7use parking_lot::RwLock;
8use std::collections::{BTreeMap, BTreeSet, HashSet};
9use sui_protocol_config::ProtocolConfig;
10use sui_types::accumulator_event::AccumulatorEvent;
11use sui_types::accumulator_root::AccumulatorObjId;
12use sui_types::base_types::VersionDigest;
13use sui_types::committee::EpochId;
14use sui_types::deny_list_v2::check_coin_deny_list_v2_during_execution;
15use sui_types::effects::{
16    AccumulatorOperation, AccumulatorValue, AccumulatorWriteV1, TransactionEffects,
17    TransactionEvents,
18};
19use sui_types::execution::{
20    DynamicallyLoadedObjectMetadata, ExecutionResults, ExecutionResultsV2, SharedInput,
21};
22use sui_types::execution_status::{ExecutionErrorKind, ExecutionStatus};
23use sui_types::inner_temporary_store::InnerTemporaryStore;
24use sui_types::layout_resolver::LayoutResolver;
25use sui_types::object::Data;
26use sui_types::storage::{BackingStore, DenyListResult, PackageObject};
27use sui_types::sui_system_state::{AdvanceEpochParams, get_sui_system_state_wrapper};
28use sui_types::{
29    SUI_DENY_LIST_OBJECT_ID,
30    base_types::{ObjectID, ObjectRef, SequenceNumber, SuiAddress, TransactionDigest},
31    effects::EffectsObjectChange,
32    error::{ExecutionError, SuiResult},
33    gas::GasCostSummary,
34    object::Object,
35    object::Owner,
36    storage::{BackingPackageStore, ChildObjectResolver, ParentSync, Storage},
37    transaction::InputObjects,
38};
39use sui_types::{SUI_SYSTEM_STATE_OBJECT_ID, TypeTag, is_system_package};
40
41pub struct TemporaryStore<'backing> {
42    // The backing store for retrieving Move packages onchain.
43    // When executing a Move call, the dependent packages are not going to be
44    // in the input objects. They will be fetched from the backing store.
45    // Also used for fetching the backing parent_sync to get the last known version for wrapped
46    // objects
47    store: &'backing dyn BackingStore,
48    tx_digest: TransactionDigest,
49    input_objects: BTreeMap<ObjectID, Object>,
50
51    /// Store the original versions of the non-exclusive write inputs, in order to detect
52    /// mutations (which are illegal, but not prevented by the type system).
53    non_exclusive_input_original_versions: BTreeMap<ObjectID, Object>,
54
55    stream_ended_consensus_objects: BTreeMap<ObjectID, SequenceNumber /* start_version */>,
56    /// The version to assign to all objects written by the transaction using this store.
57    lamport_timestamp: SequenceNumber,
58    /// Inputs that will be mutated by the transaction. Does not include NonExclusiveWrite inputs,
59    /// which can be taken as `&mut T` but cannot be directly mutated.
60    mutable_input_refs: BTreeMap<ObjectID, (VersionDigest, Owner)>,
61    execution_results: ExecutionResultsV2,
62    /// Objects that were loaded during execution (dynamic fields + received objects).
63    loaded_runtime_objects: BTreeMap<ObjectID, DynamicallyLoadedObjectMetadata>,
64    /// A map from wrapped object to its container. Used during expensive invariant checks.
65    wrapped_object_containers: BTreeMap<ObjectID, ObjectID>,
66    protocol_config: &'backing ProtocolConfig,
67
68    /// Every package that was loaded from DB store during execution.
69    /// These packages were not previously loaded into the temporary store.
70    runtime_packages_loaded_from_db: RwLock<BTreeMap<ObjectID, PackageObject>>,
71
72    /// The set of objects that we may receive during execution. Not guaranteed to receive all, or
73    /// any of the objects referenced in this set.
74    receiving_objects: Vec<ObjectRef>,
75
76    /// The set of all generated object IDs from the object runtime during the transaction. This includes any
77    /// created-and-then-deleted objects in addition to any `new_ids` which contains only the set
78    /// of created (but not deleted) IDs in the transaction.
79    generated_runtime_ids: BTreeSet<ObjectID>,
80
81    // TODO: Now that we track epoch here, there are a few places we don't need to pass it around.
82    /// The current epoch.
83    cur_epoch: EpochId,
84
85    /// The set of per-epoch config objects that were loaded during execution, and are not in the
86    /// input objects. This allows us to commit them to the effects.
87    loaded_per_epoch_config_objects: RwLock<BTreeSet<ObjectID>>,
88
89    /// Index ranges into `execution_results.accumulator_events` for events emitted from PTB
90    /// (Move) execution. Each `record_execution_results` call appends a contiguous range
91    /// bracketing the merge. Any index outside these ranges was emitted by the runtime outside
92    /// of PTB execution (currently only `gas_charger`'s `add_accumulator_event`). Consumed by
93    /// `check_sui_address_balance_changes` inside `run_conservation_checks` to gate
94    /// non-PTB-emitted events behind input reservations. Cleared on `drop_writes` since the
95    /// underlying events are also cleared. A `Vec` is sufficient because real transactions run
96    /// the PTB at most a handful of times.
97    ptb_emitted_accumulator_event_ranges: Vec<std::ops::Range<usize>>,
98}
99
100impl<'backing> TemporaryStore<'backing> {
101    /// Creates a new store associated with an authority store, and populates it with
102    /// initial objects.
103    pub fn new(
104        store: &'backing dyn BackingStore,
105        input_objects: InputObjects,
106        receiving_objects: Vec<ObjectRef>,
107        tx_digest: TransactionDigest,
108        protocol_config: &'backing ProtocolConfig,
109        cur_epoch: EpochId,
110    ) -> Self {
111        let mutable_input_refs = input_objects.exclusive_mutable_inputs();
112        let non_exclusive_input_original_versions = input_objects.non_exclusive_input_objects();
113
114        let lamport_timestamp = input_objects.lamport_timestamp(&receiving_objects);
115        let stream_ended_consensus_objects = input_objects.consensus_stream_ended_objects();
116        let objects = input_objects.into_object_map();
117        #[cfg(debug_assertions)]
118        {
119            // Ensure that input objects and receiving objects must not overlap.
120            assert!(
121                objects
122                    .keys()
123                    .collect::<HashSet<_>>()
124                    .intersection(
125                        &receiving_objects
126                            .iter()
127                            .map(|oref| &oref.0)
128                            .collect::<HashSet<_>>()
129                    )
130                    .next()
131                    .is_none()
132            );
133        }
134        Self {
135            store,
136            tx_digest,
137            input_objects: objects,
138            non_exclusive_input_original_versions,
139            stream_ended_consensus_objects,
140            lamport_timestamp,
141            mutable_input_refs,
142            execution_results: ExecutionResultsV2::default(),
143            protocol_config,
144            loaded_runtime_objects: BTreeMap::new(),
145            wrapped_object_containers: BTreeMap::new(),
146            runtime_packages_loaded_from_db: RwLock::new(BTreeMap::new()),
147            receiving_objects,
148            generated_runtime_ids: BTreeSet::new(),
149            cur_epoch,
150            loaded_per_epoch_config_objects: RwLock::new(BTreeSet::new()),
151            ptb_emitted_accumulator_event_ranges: Vec::new(),
152        }
153    }
154
155    // Helpers to access private fields
156    pub fn objects(&self) -> &BTreeMap<ObjectID, Object> {
157        &self.input_objects
158    }
159
160    pub fn update_object_version_and_prev_tx(&mut self) {
161        self.execution_results.update_version_and_previous_tx(
162            self.lamport_timestamp,
163            self.tx_digest,
164            &self.input_objects,
165            self.protocol_config.reshare_at_same_initial_version(),
166        );
167
168        #[cfg(debug_assertions)]
169        {
170            self.check_invariants();
171        }
172    }
173
174    fn calculate_accumulator_running_max_withdraws(&self) -> BTreeMap<AccumulatorObjId, u128> {
175        let mut running_net_withdraws: BTreeMap<AccumulatorObjId, i128> = BTreeMap::new();
176        let mut running_max_withdraws: BTreeMap<AccumulatorObjId, u128> = BTreeMap::new();
177        for event in &self.execution_results.accumulator_events {
178            match &event.write.value {
179                AccumulatorValue::Integer(amount) => match event.write.operation {
180                    AccumulatorOperation::Split => {
181                        let entry = running_net_withdraws
182                            .entry(event.accumulator_obj)
183                            .or_default();
184                        *entry += *amount as i128;
185                        if *entry > 0 {
186                            let max_entry = running_max_withdraws
187                                .entry(event.accumulator_obj)
188                                .or_default();
189                            *max_entry = (*max_entry).max(*entry as u128);
190                        }
191                    }
192                    AccumulatorOperation::Merge => {
193                        let entry = running_net_withdraws
194                            .entry(event.accumulator_obj)
195                            .or_default();
196                        *entry -= *amount as i128;
197                    }
198                },
199                AccumulatorValue::IntegerTuple(_, _) | AccumulatorValue::EventDigest(_) => {}
200            }
201        }
202        running_max_withdraws
203    }
204
205    /// Ensure that there is one entry for each accumulator object in the accumulator events.
206    fn merge_accumulator_events(&mut self) {
207        self.execution_results.accumulator_events = self
208            .execution_results
209            .accumulator_events
210            .iter()
211            .fold(
212                BTreeMap::<AccumulatorObjId, Vec<AccumulatorWriteV1>>::new(),
213                |mut map, event| {
214                    map.entry(event.accumulator_obj)
215                        .or_default()
216                        .push(event.write.clone());
217                    map
218                },
219            )
220            .into_iter()
221            .map(|(obj_id, writes)| {
222                AccumulatorEvent::new(obj_id, AccumulatorWriteV1::merge(writes))
223            })
224            .collect();
225    }
226
227    /// Break up the structure and return its internal stores (objects, active_inputs, written, deleted)
228    pub fn into_inner(
229        self,
230        accumulator_running_max_withdraws: BTreeMap<AccumulatorObjId, u128>,
231    ) -> InnerTemporaryStore {
232        let results = self.execution_results;
233        InnerTemporaryStore {
234            input_objects: self.input_objects,
235            stream_ended_consensus_objects: self.stream_ended_consensus_objects,
236            mutable_inputs: self.mutable_input_refs,
237            written: results.written_objects,
238            events: TransactionEvents {
239                data: results.user_events,
240            },
241            accumulator_events: results.accumulator_events,
242            loaded_runtime_objects: self.loaded_runtime_objects,
243            runtime_packages_loaded_from_db: self.runtime_packages_loaded_from_db.into_inner(),
244            lamport_version: self.lamport_timestamp,
245            binary_config: self.protocol_config.binary_config(None),
246            accumulator_running_max_withdraws,
247        }
248    }
249
250    /// For every object from active_inputs (i.e. all mutable objects), if they are not
251    /// mutated during the transaction execution, force mutating them by incrementing the
252    /// sequence number. This is required to achieve safety.
253    pub(crate) fn ensure_active_inputs_mutated(&mut self) {
254        let mut to_be_updated = vec![];
255        // Note: we do not mutate input objects if they are non-exclusive write
256        for id in self.mutable_input_refs.keys() {
257            if !self.execution_results.modified_objects.contains(id) {
258                // We cannot update here but have to push to `to_be_updated` and update later
259                // because the for loop is holding a reference to `self`, and calling
260                // `self.mutate_input_object` requires a mutable reference to `self`.
261                to_be_updated.push(self.input_objects[id].clone());
262            }
263        }
264        for object in to_be_updated {
265            // The object must be mutated as it was present in the input objects
266            self.mutate_input_object(object.clone());
267        }
268    }
269
270    fn get_object_changes(&self) -> BTreeMap<ObjectID, EffectsObjectChange> {
271        let results = &self.execution_results;
272        let all_ids = results
273            .created_object_ids
274            .iter()
275            .chain(&results.deleted_object_ids)
276            .chain(&results.modified_objects)
277            .chain(results.written_objects.keys())
278            .collect::<BTreeSet<_>>();
279        all_ids
280            .into_iter()
281            .map(|id| {
282                (
283                    *id,
284                    EffectsObjectChange::new(
285                        self.get_object_modified_at(id)
286                            .map(|metadata| ((metadata.version, metadata.digest), metadata.owner)),
287                        results.written_objects.get(id),
288                        results.created_object_ids.contains(id),
289                        results.deleted_object_ids.contains(id),
290                    ),
291                )
292            })
293            .chain(results.accumulator_events.iter().cloned().map(
294                |AccumulatorEvent {
295                     accumulator_obj,
296                     write,
297                 }| {
298                    (
299                        *accumulator_obj.inner(),
300                        EffectsObjectChange::new_from_accumulator_write(write),
301                    )
302                },
303            ))
304            .collect()
305    }
306
307    pub fn into_effects(
308        mut self,
309        shared_object_refs: Vec<SharedInput>,
310        transaction_digest: &TransactionDigest,
311        mut transaction_dependencies: BTreeSet<TransactionDigest>,
312        gas_cost_summary: GasCostSummary,
313        status: ExecutionStatus,
314        gas_charger: &mut GasCharger,
315        epoch: EpochId,
316    ) -> (InnerTemporaryStore, TransactionEffects) {
317        self.update_object_version_and_prev_tx();
318        // This must happens before merge_accumulator_events.
319        let accumulator_running_max_withdraws = self.calculate_accumulator_running_max_withdraws();
320        self.merge_accumulator_events();
321
322        // Regardless of execution status (including aborts), we insert the previous transaction
323        // for any successfully received objects during the transaction.
324        for (id, expected_version, expected_digest) in &self.receiving_objects {
325            // If the receiving object is in the loaded runtime objects, then that means that it
326            // was actually successfully loaded (so existed, and there was authenticated mutable
327            // access to it). So we insert the previous transaction as a dependency.
328            if let Some(obj_meta) = self.loaded_runtime_objects.get(id) {
329                // Check that the expected version, digest, and owner match the loaded version,
330                // digest, and owner. If they don't then don't register a dependency.
331                // This is because this could be "spoofed" by loading a dynamic object field.
332                let loaded_via_receive = obj_meta.version == *expected_version
333                    && obj_meta.digest == *expected_digest
334                    && obj_meta.owner.is_address_owned();
335                if loaded_via_receive {
336                    transaction_dependencies.insert(obj_meta.previous_transaction);
337                }
338            }
339        }
340
341        assert!(self.protocol_config.enable_effects_v2());
342
343        // In the case of special transactions that don't require a gas object,
344        // we don't really care about the effects to gas, just use the input for it.
345        // Gas coins are guaranteed to be at least size 1 and if more than 1
346        // the first coin is where all the others are merged.
347        let gas_coin = gas_charger
348            .gas_payment_amount()
349            .and_then(|gp| match gp.location {
350                PaymentLocation::Coin(coin_id) => Some(coin_id),
351                PaymentLocation::AddressBalance(_) => None,
352            });
353
354        let object_changes = self.get_object_changes();
355
356        let lamport_version = self.lamport_timestamp;
357        // TODO: Cleanup this clone. Potentially add unchanged_shraed_objects directly to InnerTempStore.
358        let loaded_per_epoch_config_objects = self.loaded_per_epoch_config_objects.read().clone();
359        let inner = self.into_inner(accumulator_running_max_withdraws);
360
361        let effects = TransactionEffects::new_from_execution_v2(
362            status,
363            epoch,
364            gas_cost_summary,
365            // TODO: Provide the list of read-only shared objects directly.
366            shared_object_refs,
367            loaded_per_epoch_config_objects,
368            *transaction_digest,
369            lamport_version,
370            object_changes,
371            gas_coin,
372            if inner.events.data.is_empty() {
373                None
374            } else {
375                Some(inner.events.digest())
376            },
377            transaction_dependencies.into_iter().collect(),
378        );
379
380        (inner, effects)
381    }
382
383    /// An internal check of the invariants (will only fire in debug)
384    #[cfg(debug_assertions)]
385    fn check_invariants(&self) {
386        // Check not both deleted and written
387        debug_assert!(
388            {
389                self.execution_results
390                    .written_objects
391                    .keys()
392                    .all(|id| !self.execution_results.deleted_object_ids.contains(id))
393            },
394            "Object both written and deleted."
395        );
396
397        // Check all mutable inputs are modified
398        debug_assert!(
399            {
400                self.mutable_input_refs
401                    .keys()
402                    .all(|id| self.execution_results.modified_objects.contains(id))
403            },
404            "Mutable input not modified."
405        );
406
407        debug_assert!(
408            {
409                self.execution_results
410                    .written_objects
411                    .values()
412                    .all(|obj| obj.previous_transaction == self.tx_digest)
413            },
414            "Object previous transaction not properly set",
415        );
416    }
417
418    /// Mutate a mutable input object. This is used to mutate input objects outside of PT execution.
419    pub fn mutate_input_object(&mut self, object: Object) {
420        let id = object.id();
421        debug_assert!(self.input_objects.contains_key(&id));
422        debug_assert!(!object.is_immutable());
423        self.execution_results.modified_objects.insert(id);
424        self.execution_results.written_objects.insert(id, object);
425    }
426
427    pub fn mutate_new_or_input_object(&mut self, object: Object) {
428        let id = object.id();
429        debug_assert!(!object.is_immutable());
430        if self.input_objects.contains_key(&id) {
431            self.execution_results.modified_objects.insert(id);
432        }
433        self.execution_results.written_objects.insert(id, object);
434    }
435
436    /// Mutate a child object outside of PT. This should be used extremely rarely.
437    /// Currently it's only used by advance_epoch_safe_mode because it's all native
438    /// without PT. This should almost never be used otherwise.
439    pub fn mutate_child_object(&mut self, old_object: Object, new_object: Object) {
440        let id = new_object.id();
441        let old_ref = old_object.compute_object_reference();
442        debug_assert_eq!(old_ref.0, id);
443        self.loaded_runtime_objects.insert(
444            id,
445            DynamicallyLoadedObjectMetadata {
446                version: old_ref.1,
447                digest: old_ref.2,
448                owner: old_object.owner.clone(),
449                storage_rebate: old_object.storage_rebate,
450                previous_transaction: old_object.previous_transaction,
451            },
452        );
453        self.execution_results.modified_objects.insert(id);
454        self.execution_results
455            .written_objects
456            .insert(id, new_object);
457    }
458
459    /// Upgrade system package during epoch change. This requires special treatment
460    /// since the system package to be upgraded is not in the input objects.
461    /// We could probably fix above to make it less special.
462    pub fn upgrade_system_package(&mut self, package: Object) {
463        let id = package.id();
464        assert!(package.is_package() && is_system_package(id));
465        self.execution_results.modified_objects.insert(id);
466        self.execution_results.written_objects.insert(id, package);
467    }
468
469    /// Crate a new objcet. This is used to create objects outside of PT execution.
470    pub fn create_object(&mut self, object: Object) {
471        // Created mutable objects' versions are set to the store's lamport timestamp when it is
472        // committed to effects. Creating an object at a non-zero version risks violating the
473        // lamport timestamp invariant (that a transaction's lamport timestamp is strictly greater
474        // than all versions witnessed by the transaction).
475        debug_assert!(
476            object.is_immutable() || object.version() == SequenceNumber::MIN,
477            "Created mutable objects should not have a version set",
478        );
479        let id = object.id();
480        self.execution_results.created_object_ids.insert(id);
481        self.execution_results.written_objects.insert(id, object);
482    }
483
484    /// Delete a mutable input object. This is used to delete input objects outside of PT execution.
485    pub fn delete_input_object(&mut self, id: &ObjectID) {
486        // there should be no deletion after write
487        debug_assert!(!self.execution_results.written_objects.contains_key(id));
488        debug_assert!(self.input_objects.contains_key(id));
489        self.execution_results.modified_objects.insert(*id);
490        self.execution_results.deleted_object_ids.insert(*id);
491    }
492
493    pub fn drop_writes(&mut self) {
494        self.execution_results.drop_writes();
495        // The PTB-emitted ranges pointed into the now-cleared accumulator_events vec.
496        self.ptb_emitted_accumulator_event_ranges.clear();
497    }
498
499    pub fn read_object(&self, id: &ObjectID) -> Option<&Object> {
500        // there should be no read after delete
501        debug_assert!(!self.execution_results.deleted_object_ids.contains(id));
502        self.execution_results
503            .written_objects
504            .get(id)
505            .or_else(|| self.input_objects.get(id))
506    }
507
508    pub fn save_loaded_runtime_objects(
509        &mut self,
510        loaded_runtime_objects: BTreeMap<ObjectID, DynamicallyLoadedObjectMetadata>,
511    ) {
512        #[cfg(debug_assertions)]
513        {
514            for (id, v1) in &loaded_runtime_objects {
515                if let Some(v2) = self.loaded_runtime_objects.get(id) {
516                    assert_eq!(v1, v2);
517                }
518            }
519            for (id, v1) in &self.loaded_runtime_objects {
520                if let Some(v2) = loaded_runtime_objects.get(id) {
521                    assert_eq!(v1, v2);
522                }
523            }
524        }
525        // Merge the two maps because we may be calling the execution engine more than once
526        // (e.g. in advance epoch transaction, where we may be publishing a new system package).
527        self.loaded_runtime_objects.extend(loaded_runtime_objects);
528    }
529
530    pub fn save_wrapped_object_containers(
531        &mut self,
532        wrapped_object_containers: BTreeMap<ObjectID, ObjectID>,
533    ) {
534        #[cfg(debug_assertions)]
535        {
536            for (id, container1) in &wrapped_object_containers {
537                if let Some(container2) = self.wrapped_object_containers.get(id) {
538                    assert_eq!(container1, container2);
539                }
540            }
541            for (id, container1) in &self.wrapped_object_containers {
542                if let Some(container2) = wrapped_object_containers.get(id) {
543                    assert_eq!(container1, container2);
544                }
545            }
546        }
547        // Merge the two maps because we may be calling the execution engine more than once
548        // (e.g. in advance epoch transaction, where we may be publishing a new system package).
549        self.wrapped_object_containers
550            .extend(wrapped_object_containers);
551    }
552
553    pub fn save_generated_object_ids(&mut self, generated_ids: BTreeSet<ObjectID>) {
554        #[cfg(debug_assertions)]
555        {
556            for id in &self.generated_runtime_ids {
557                assert!(!generated_ids.contains(id))
558            }
559            for id in &generated_ids {
560                assert!(!self.generated_runtime_ids.contains(id));
561            }
562        }
563        self.generated_runtime_ids.extend(generated_ids);
564    }
565
566    pub fn estimate_effects_size_upperbound(&self) -> usize {
567        TransactionEffects::estimate_effects_size_upperbound_v2(
568            self.execution_results.written_objects.len(),
569            self.execution_results.modified_objects.len(),
570            self.input_objects.len(),
571        )
572    }
573
574    pub fn written_objects_size(&self) -> usize {
575        self.execution_results
576            .written_objects
577            .values()
578            .fold(0, |sum, obj| sum + obj.object_size_for_gas_metering())
579    }
580
581    /// Validates gasless post-execution invariants:
582    /// - No new objects were created or existing objects mutated (written_objects is empty)
583    /// - The set of deleted objects exactly equals the set of input Coin objects
584    /// - Each recipient receives at least the minimum transfer amount per token type
585    /// - Unused withdrawal reservation (reservation - actual split) is 0 or >= min_amount
586    pub fn check_gasless_execution_requirements(
587        &self,
588        withdrawal_reservations: Option<&BTreeMap<(SuiAddress, TypeTag), u64>>,
589    ) -> Result<(), String> {
590        if !self.execution_results.written_objects.is_empty() {
591            return Err("Gasless transactions cannot create or mutate objects".to_string());
592        }
593
594        let input_coin_ids: BTreeSet<ObjectID> = self
595            .input_objects
596            .iter()
597            .filter(|(_, obj)| obj.coin_type_maybe().is_some())
598            .map(|(id, _)| *id)
599            .collect();
600        if self.execution_results.deleted_object_ids != input_coin_ids {
601            return Err(format!(
602                "Gasless transaction must destroy exactly its input Coins. \
603                 Expected: {input_coin_ids:?}, deleted: {:?}",
604                self.execution_results.deleted_object_ids
605            ));
606        }
607
608        let allowed_types =
609            sui_types::transaction::get_gasless_allowed_token_types(self.protocol_config);
610
611        // Aggregate signed balance changes per (address, token_type).
612        // Positive nets are recipient deposits that must meet the minimum transfer amount.
613        let net_totals = sui_types::balance_change::signed_balance_changes_from_events(
614            &self.execution_results.accumulator_events,
615        )
616        .fold(
617            BTreeMap::<(SuiAddress, TypeTag), i128>::new(),
618            |mut totals, (address, token_type, signed_amount)| {
619                *totals.entry((address, token_type)).or_default() += signed_amount;
620                totals
621            },
622        );
623
624        for ((recipient, token_type), net_amount) in &net_totals {
625            if *net_amount <= 0 {
626                continue;
627            }
628            if let Some(&min_amount) = allowed_types.get(token_type)
629                && *net_amount < i128::from(min_amount)
630            {
631                return Err(format!(
632                    "Gasless transfer of {net_amount} to {recipient} is below \
633                     minimum {min_amount} for token type {token_type}"
634                ));
635            }
636        }
637
638        if let Some(reservations) = withdrawal_reservations {
639            for ((owner, token_type), &reserved) in reservations {
640                let net = net_totals
641                    .get(&(*owner, token_type.clone()))
642                    .copied()
643                    .unwrap_or(0);
644                let remaining = (reserved as i128).saturating_add(net);
645                if remaining > 0
646                    && let Some(&min_balance_remaining) = allowed_types.get(token_type)
647                    && min_balance_remaining > 0
648                    && remaining < min_balance_remaining as i128
649                {
650                    return Err(format!(
651                        "Gasless withdrawal leaves {remaining} unused for {owner}, \
652                         below minimum {min_balance_remaining} for token type {token_type}"
653                    ));
654                }
655            }
656        }
657
658        Ok(())
659    }
660
661    /// If there are unmetered storage rebate (due to system transaction), we put them into
662    /// the storage rebate of 0x5 object.
663    /// TODO: This will not work for potential future new system transactions if 0x5 is not in the input.
664    /// We should fix this.
665    pub fn conserve_unmetered_storage_rebate(&mut self, unmetered_storage_rebate: u64) {
666        if unmetered_storage_rebate == 0 {
667            // If unmetered_storage_rebate is 0, we are most likely executing the genesis transaction.
668            // And in that case we cannot mutate the 0x5 object because it's newly created.
669            // And there is no storage rebate that needs distribution anyway.
670            return;
671        }
672        tracing::debug!(
673            "Amount of unmetered storage rebate from system tx: {:?}",
674            unmetered_storage_rebate
675        );
676        let mut system_state_wrapper = self
677            .read_object(&SUI_SYSTEM_STATE_OBJECT_ID)
678            .expect("0x5 object must be mutated in system tx with unmetered storage rebate")
679            .clone();
680        // In unmetered execution, storage_rebate field of mutated object must be 0.
681        // If not, we would be dropping SUI on the floor by overriding it.
682        assert_eq!(system_state_wrapper.storage_rebate, 0);
683        system_state_wrapper.storage_rebate = unmetered_storage_rebate;
684        self.mutate_input_object(system_state_wrapper);
685    }
686
687    /// Add an accumulator event to the execution results.
688    pub fn add_accumulator_event(&mut self, event: AccumulatorEvent) {
689        self.execution_results.accumulator_events.push(event);
690    }
691
692    /// Given an object ID, if it's not modified, returns None.
693    /// Otherwise returns its metadata, including version, digest, owner and storage rebate.
694    /// A modified object must be either a mutable input, or a loaded child object.
695    /// The only exception is when we upgrade system packages, in which case the upgraded
696    /// system packages are not part of input, but are modified.
697    fn get_object_modified_at(
698        &self,
699        object_id: &ObjectID,
700    ) -> Option<DynamicallyLoadedObjectMetadata> {
701        if self.execution_results.modified_objects.contains(object_id) {
702            Some(
703                self.mutable_input_refs
704                    .get(object_id)
705                    .map(
706                        |((version, digest), owner)| DynamicallyLoadedObjectMetadata {
707                            version: *version,
708                            digest: *digest,
709                            owner: owner.clone(),
710                            // It's guaranteed that a mutable input object is an input object.
711                            storage_rebate: self.input_objects[object_id].storage_rebate,
712                            previous_transaction: self.input_objects[object_id]
713                                .previous_transaction,
714                        },
715                    )
716                    .or_else(|| self.loaded_runtime_objects.get(object_id).cloned())
717                    .unwrap_or_else(|| {
718                        debug_assert!(is_system_package(*object_id));
719                        let package_obj =
720                            self.store.get_package_object(object_id).unwrap().unwrap();
721                        let obj = package_obj.object();
722                        DynamicallyLoadedObjectMetadata {
723                            version: obj.version(),
724                            digest: obj.digest(),
725                            owner: obj.owner.clone(),
726                            storage_rebate: obj.storage_rebate,
727                            previous_transaction: obj.previous_transaction,
728                        }
729                    }),
730            )
731        } else {
732            None
733        }
734    }
735
736    pub fn protocol_config(&self) -> &'backing ProtocolConfig {
737        self.protocol_config
738    }
739}
740
741impl TemporaryStore<'_> {
742    // check that every object read is owned directly or indirectly by sender, sponsor,
743    // or a shared object input
744    pub fn check_ownership_invariants(
745        &self,
746        sender: &SuiAddress,
747        sponsor: &Option<SuiAddress>,
748        gas_charger: &mut GasCharger,
749        mutable_inputs: &HashSet<ObjectID>,
750        is_epoch_change: bool,
751    ) -> SuiResult<()> {
752        let gas_objs: HashSet<&ObjectID> = gas_charger.used_coins().map(|g| &g.0).collect();
753        let gas_owner = sponsor.as_ref().unwrap_or(sender);
754
755        // mark input objects as authenticated
756        let mut authenticated_for_mutation: HashSet<_> = self
757            .input_objects
758            .iter()
759            .filter_map(|(id, obj)| {
760                match &obj.owner {
761                    Owner::AddressOwner(a) => {
762                        if gas_objs.contains(id) {
763                            // gas object must be owned by sender or sponsor
764                            assert!(
765                                a == gas_owner,
766                                "Gas object must be owned by sender or sponsor"
767                            );
768                        } else {
769                            assert!(sender == a, "Input object must be owned by sender");
770                        }
771                        Some(id)
772                    }
773                    Owner::Shared { .. } | Owner::ConsensusAddressOwner { .. } => Some(id),
774                    Owner::Immutable => {
775                        // object is authenticated, but it cannot own other objects,
776                        // so we should not add it to `authenticated_objs`
777                        // However, we would definitely want to add immutable objects
778                        // to the set of authenticated roots if we were doing runtime
779                        // checks inside the VM instead of after-the-fact in the temporary
780                        // store. Here, we choose not to add them because this will catch a
781                        // bug where we mutate or delete an object that belongs to an immutable
782                        // object (though it will show up somewhat opaquely as an authentication
783                        // failure), whereas adding the immutable object to the roots will prevent
784                        // us from catching this.
785                        None
786                    }
787                    Owner::ObjectOwner(_parent) => {
788                        unreachable!(
789                            "Input objects must be address owned, shared, consensus, or immutable"
790                        )
791                    }
792                }
793            })
794            .filter(|id| {
795                // remove any non-mutable inputs. This will remove deleted or readonly shared
796                // objects
797                mutable_inputs.contains(id)
798            })
799            .copied()
800            // Add any object IDs generated in the object runtime during execution to the
801            // authenticated set (i.e., new (non-package) objects, and possibly ephemeral UIDs).
802            .chain(self.generated_runtime_ids.iter().copied())
803            .collect();
804
805        // Add sender and sponsor (if present) to authenticated set
806        authenticated_for_mutation.insert((*sender).into());
807        if let Some(sponsor) = sponsor {
808            authenticated_for_mutation.insert((*sponsor).into());
809        }
810
811        // check all modified objects are authenticated
812        let mut objects_to_authenticate = self
813            .execution_results
814            .modified_objects
815            .iter()
816            .copied()
817            .collect::<Vec<_>>();
818
819        while let Some(to_authenticate) = objects_to_authenticate.pop() {
820            if authenticated_for_mutation.contains(&to_authenticate) {
821                // object has already been authenticated
822                continue;
823            }
824
825            let parent = if let Some(container_id) =
826                self.wrapped_object_containers.get(&to_authenticate)
827            {
828                // It's a wrapped object, so check that the container is authenticated
829                *container_id
830            } else {
831                // It's non-wrapped, so check the owner -- we can load the object from the
832                // store.
833                let Some(old_obj) = self.store.get_object(&to_authenticate) else {
834                    panic!(
835                        "Failed to load object {to_authenticate:?}.\n \
836                         If it cannot be loaded, we would expect it to be in the wrapped object map: {:#?}",
837                        &self.wrapped_object_containers
838                    )
839                };
840
841                match &old_obj.owner {
842                    // We mutated a dynamic field, we can continue to trace this back to verify
843                    // proper ownership.
844                    Owner::ObjectOwner(parent) => ObjectID::from(*parent),
845                    // We mutated an address owned or sequenced address owned object -- one of two cases apply:
846                    // 1) the object is owned by an object or address in the authenticated set,
847                    // 2) the object is owned by some other address, in which case we should
848                    //    continue to trace this back.
849                    Owner::AddressOwner(parent)
850                    | Owner::ConsensusAddressOwner { owner: parent, .. } => {
851                        // For Receiving<_> objects, the address owner is actually an object.
852                        // If it was actually an address, we should have caught it as an input and
853                        // it would already have been in authenticated_for_mutation
854                        ObjectID::from(*parent)
855                    }
856                    // We mutated a shared object -- we checked if this object was in the
857                    // authenticated set at the top of this loop and it wasn't so this is a failure.
858                    owner @ Owner::Shared { .. } => {
859                        panic!(
860                            "Unauthenticated root at {to_authenticate:?} with owner {owner:?}\n\
861                             Potentially covering objects in: {authenticated_for_mutation:#?}"
862                        );
863                    }
864                    Owner::Immutable => {
865                        assert!(
866                            is_epoch_change,
867                            "Immutable objects cannot be written, except for \
868                             Sui Framework/Move stdlib upgrades at epoch change boundaries"
869                        );
870                        // Note: this assumes that the only immutable objects an epoch change
871                        // tx can update are system packages,
872                        // but in principle we could allow others.
873                        assert!(
874                            is_system_package(to_authenticate),
875                            "Only system packages can be upgraded"
876                        );
877                        continue;
878                    }
879                }
880            };
881
882            // we now assume the object is authenticated and check the parent
883            authenticated_for_mutation.insert(to_authenticate);
884            objects_to_authenticate.push(parent);
885        }
886        Ok(())
887    }
888}
889
890impl TemporaryStore<'_> {
891    /// Track storage gas for each mutable input object (including the gas coin)
892    /// and each created object. Compute storage refunds for each deleted object.
893    /// Will *not* charge anything, gas status keeps track of storage cost and rebate.
894    /// All objects will be updated with their new (current) storage rebate/cost.
895    /// `SuiGasStatus` `storage_rebate` and `storage_gas_units` track the transaction
896    /// overall storage rebate and cost.
897    pub(crate) fn collect_storage_and_rebate(&mut self, gas_charger: &mut GasCharger) {
898        // Use two loops because we cannot mut iterate written while calling get_object_modified_at.
899        let old_storage_rebates: Vec<_> = self
900            .execution_results
901            .written_objects
902            .keys()
903            .map(|object_id| {
904                self.get_object_modified_at(object_id)
905                    .map(|metadata| metadata.storage_rebate)
906                    .unwrap_or_default()
907            })
908            .collect();
909        for (object, old_storage_rebate) in self
910            .execution_results
911            .written_objects
912            .values_mut()
913            .zip_debug_eq(old_storage_rebates)
914        {
915            // new object size
916            let new_object_size = object.object_size_for_gas_metering();
917            // track changes and compute the new object `storage_rebate`
918            let new_storage_rebate = gas_charger.track_storage_mutation(
919                object.id(),
920                new_object_size,
921                old_storage_rebate,
922            );
923            object.storage_rebate = new_storage_rebate;
924        }
925
926        self.collect_rebate(gas_charger);
927    }
928
929    pub(crate) fn collect_rebate(&self, gas_charger: &mut GasCharger) {
930        for object_id in &self.execution_results.modified_objects {
931            if self
932                .execution_results
933                .written_objects
934                .contains_key(object_id)
935            {
936                continue;
937            }
938            // get and track the deleted object `storage_rebate`
939            let storage_rebate = self
940                .get_object_modified_at(object_id)
941                // Unwrap is safe because this loop iterates through all modified objects.
942                .unwrap()
943                .storage_rebate;
944            gas_charger.track_storage_mutation(*object_id, 0, storage_rebate);
945        }
946    }
947
948    pub fn check_execution_results_consistency(&self) -> Result<(), ExecutionError> {
949        assert_invariant!(
950            self.execution_results
951                .created_object_ids
952                .iter()
953                .all(|id| !self.execution_results.deleted_object_ids.contains(id)
954                    && !self.execution_results.modified_objects.contains(id)),
955            "Created object IDs cannot also be deleted or modified"
956        );
957        assert_invariant!(
958            self.execution_results.modified_objects.iter().all(|id| {
959                self.mutable_input_refs.contains_key(id)
960                    || self.loaded_runtime_objects.contains_key(id)
961                    || is_system_package(*id)
962            }),
963            "A modified object must be either a mutable input, a loaded child object, or a system package"
964        );
965        Ok(())
966    }
967}
968//==============================================================================
969// Charge gas current - end
970//==============================================================================
971
972impl TemporaryStore<'_> {
973    pub fn advance_epoch_safe_mode(
974        &mut self,
975        params: &AdvanceEpochParams,
976        protocol_config: &ProtocolConfig,
977    ) {
978        let wrapper = get_sui_system_state_wrapper(self.store.as_object_store())
979            .expect("System state wrapper object must exist");
980        let (old_object, new_object) =
981            wrapper.advance_epoch_safe_mode(params, self.store.as_object_store(), protocol_config);
982        self.mutate_child_object(old_object, new_object);
983    }
984}
985
986type ModifiedObjectInfo<'a> = (
987    ObjectID,
988    // old object metadata, including version, digest, owner, and storage rebate.
989    Option<DynamicallyLoadedObjectMetadata>,
990    Option<&'a Object>,
991);
992
993impl TemporaryStore<'_> {
994    fn get_input_sui(
995        &self,
996        id: &ObjectID,
997        expected_version: SequenceNumber,
998        layout_resolver: &mut impl LayoutResolver,
999    ) -> Result<u64, ExecutionError> {
1000        if let Some(obj) = self.input_objects.get(id) {
1001            // the assumption here is that if it is in the input objects must be the right one
1002            if obj.version() != expected_version {
1003                invariant_violation!(
1004                    "Version mismatching when resolving input object to check conservation--\
1005                     expected {}, got {}",
1006                    expected_version,
1007                    obj.version(),
1008                );
1009            }
1010            obj.get_total_sui(layout_resolver).map_err(|e| {
1011                make_invariant_violation!(
1012                    "Failed looking up input SUI in SUI conservation checking for input with \
1013                         type {:?}: {e:#?}",
1014                    obj.struct_tag(),
1015                )
1016            })
1017        } else {
1018            // not in input objects, must be a dynamic field
1019            let Some(obj) = self.store.get_object_by_key(id, expected_version) else {
1020                invariant_violation!(
1021                    "Failed looking up dynamic field {id} in SUI conservation checking"
1022                );
1023            };
1024            obj.get_total_sui(layout_resolver).map_err(|e| {
1025                make_invariant_violation!(
1026                    "Failed looking up input SUI in SUI conservation checking for type \
1027                         {:?}: {e:#?}",
1028                    obj.struct_tag(),
1029                )
1030            })
1031        }
1032    }
1033
1034    /// Return the list of all modified objects, for each object, returns
1035    /// - Object ID,
1036    /// - Input: If the object existed prior to this transaction, include their version and storage_rebate,
1037    /// - Output: If a new version of the object is written, include the new object.
1038    fn get_modified_objects(&self) -> Vec<ModifiedObjectInfo<'_>> {
1039        self.execution_results
1040            .modified_objects
1041            .iter()
1042            .map(|id| {
1043                let metadata = self.get_object_modified_at(id);
1044                let output = self.execution_results.written_objects.get(id);
1045                (*id, metadata, output)
1046            })
1047            .chain(
1048                self.execution_results
1049                    .written_objects
1050                    .iter()
1051                    .filter_map(|(id, object)| {
1052                        if self.execution_results.modified_objects.contains(id) {
1053                            None
1054                        } else {
1055                            Some((*id, None, Some(object)))
1056                        }
1057                    }),
1058            )
1059            .collect()
1060    }
1061
1062    /// Check that this transaction neither creates nor destroys SUI. This should hold for all txes
1063    /// except the epoch change tx, which mints staking rewards equal to the gas fees burned in the
1064    /// previous epoch.  Specifically, this checks two key invariants about storage
1065    /// fees and storage rebate:
1066    ///
1067    /// 1. all SUI in storage rebate fields of input objects should flow either to the transaction
1068    ///    storage rebate, or the transaction non-refundable storage rebate
1069    /// 2. all SUI charged for storage should flow into the storage rebate field of some output
1070    ///    object
1071    ///
1072    /// This function is intended to be called *after* we have charged for
1073    /// gas + applied the storage rebate to the gas object, but *before* we
1074    /// have updated object versions.
1075    pub fn check_sui_conserved(
1076        &self,
1077        simple_conservation_checks: bool,
1078        gas_summary: &GasCostSummary,
1079    ) -> Result<(), ExecutionError> {
1080        if !simple_conservation_checks {
1081            return Ok(());
1082        }
1083        // total amount of SUI in storage rebate of input objects
1084        let mut total_input_rebate = 0;
1085        // total amount of SUI in storage rebate of output objects
1086        let mut total_output_rebate = 0;
1087        for (_id, input, output) in self.get_modified_objects() {
1088            if let Some(input) = input {
1089                total_input_rebate += input.storage_rebate;
1090            }
1091            if let Some(object) = output {
1092                total_output_rebate += object.storage_rebate;
1093            }
1094        }
1095
1096        if gas_summary.storage_cost == 0 {
1097            // this condition is usually true when the transaction went OOG and no
1098            // gas is left for storage charges.
1099            // The storage cost has to be there at least for the gas coin which
1100            // will not be deleted even when going to 0.
1101            // However if the storage cost is 0 and if there is any object touched
1102            // or deleted the value in input must be equal to the output plus rebate and
1103            // non refundable.
1104            // Rebate and non refundable will be positive when there are object deleted
1105            // (gas smashing being the primary and possibly only example).
1106            // A more typical condition is for all storage charges in summary to be 0 and
1107            // then input and output must be the same value
1108            if total_input_rebate
1109                != total_output_rebate
1110                    + gas_summary.storage_rebate
1111                    + gas_summary.non_refundable_storage_fee
1112            {
1113                return Err(ExecutionError::invariant_violation(format!(
1114                    "SUI conservation failed -- no storage charges in gas summary \
1115                        and total storage input rebate {} not equal  \
1116                        to total storage output rebate {}",
1117                    total_input_rebate, total_output_rebate,
1118                )));
1119            }
1120        } else {
1121            // all SUI in storage rebate fields of input objects should flow either to
1122            // the transaction storage rebate, or the non-refundable storage rebate pool
1123            if total_input_rebate
1124                != gas_summary.storage_rebate + gas_summary.non_refundable_storage_fee
1125            {
1126                return Err(ExecutionError::invariant_violation(format!(
1127                    "SUI conservation failed -- {} SUI in storage rebate field of input objects, \
1128                        {} SUI in tx storage rebate or tx non-refundable storage rebate",
1129                    total_input_rebate, gas_summary.non_refundable_storage_fee,
1130                )));
1131            }
1132
1133            // all SUI charged for storage should flow into the storage rebate field
1134            // of some output object
1135            if gas_summary.storage_cost != total_output_rebate {
1136                return Err(ExecutionError::invariant_violation(format!(
1137                    "SUI conservation failed -- {} SUI charged for storage, \
1138                        {} SUI in storage rebate field of output objects",
1139                    gas_summary.storage_cost, total_output_rebate
1140                )));
1141            }
1142        }
1143        Ok(())
1144    }
1145
1146    /// Defense-in-depth invariant on funds-accumulator events. Per `(address, type)`:
1147    /// - If the pair is in `input_reservations`: net withdrawal ≤ budget.
1148    /// - Else if PTB-emitted events touched it: runtime contribution must not push the net
1149    ///   below Move's deposit (`actual ≥ min(0, ptb_change)`).
1150    /// - Else: any event is unauthorized — fatal.
1151    ///
1152    /// Currently the only funds-accumulator type is `Balance<T>`, so the check is scoped to
1153    /// those events. As more accumulator shapes are added the filter and the integer
1154    /// arithmetic in `check_address_balance_changes_impl` will need to grow with them.
1155    ///
1156    /// PTB-emitted events are identified via `ptb_emitted_accumulator_event_ranges`, populated
1157    /// at `record_execution_results` time. They are trusted because Move enforces `&mut UID`
1158    /// and the native checks the actual balance.
1159    ///
1160    /// `protocol_config.enforce_address_balance_change_invariant()` selects the failure mode:
1161    /// - On (post-flag): violations are returned as `Err` so the caller's
1162    ///   conservation-recovery flow can abort the tx cleanly.
1163    /// - Off (pre-flag): the check still runs, but a violation panics so unexpected
1164    ///   violations surface loudly during rollout.
1165    pub fn check_address_balance_changes(
1166        &self,
1167        protocol_config: &ProtocolConfig,
1168        input_reservations: &BTreeMap<(SuiAddress, TypeTag), u64>,
1169    ) -> Result<(), ExecutionError> {
1170        let result = self.check_address_balance_changes_impl(input_reservations);
1171        if protocol_config.enforce_address_balance_change_invariant() {
1172            result
1173        } else {
1174            if let Err(e) = result {
1175                panic!("address-balance-change invariant violated pre-flag: {e}");
1176            }
1177            Ok(())
1178        }
1179    }
1180
1181    fn check_address_balance_changes_impl(
1182        &self,
1183        input_reservations: &BTreeMap<(SuiAddress, TypeTag), u64>,
1184    ) -> Result<(), ExecutionError> {
1185        use sui_types::balance::Balance;
1186
1187        let mut actual_changes: BTreeMap<(SuiAddress, TypeTag), i128> = BTreeMap::new();
1188        let mut ptb_changes: BTreeMap<(SuiAddress, TypeTag), i128> = BTreeMap::new();
1189        for (idx, event) in self.execution_results.accumulator_events.iter().enumerate() {
1190            // Filter on the value shape first: only `Integer` carries the funds-flow we care
1191            // about. Other shapes (e.g. `EventDigest` for event-stream heads) belong to
1192            // non-Balance accumulators and are out of scope here. If we ever see an `Integer`
1193            // value at a non-`Balance<T>` type, the accounting invariants below don't apply
1194            // — debug_fatal so that case is surfaced instead of silently accepted.
1195            let amount = match event.write.value {
1196                AccumulatorValue::Integer(amount) => amount as i128,
1197                AccumulatorValue::IntegerTuple(_, _) | AccumulatorValue::EventDigest(_) => {
1198                    continue;
1199                }
1200            };
1201            if !Balance::is_balance_type(&event.write.address.ty) {
1202                debug_fatal!(
1203                    "Integer accumulator value at non-Balance type: {:?}",
1204                    event.write.address.ty
1205                );
1206                continue;
1207            }
1208            let is_ptb_emitted = self
1209                .ptb_emitted_accumulator_event_ranges
1210                .iter()
1211                .any(|range| range.contains(&idx));
1212            let key = (event.write.address.address, event.write.address.ty.clone());
1213            let change = match event.write.operation {
1214                AccumulatorOperation::Split => -amount,
1215                AccumulatorOperation::Merge => amount,
1216            };
1217            *actual_changes.entry(key.clone()).or_insert(0) += change;
1218            if is_ptb_emitted {
1219                *ptb_changes.entry(key).or_insert(0) += change;
1220            }
1221        }
1222
1223        for (key, actual) in actual_changes {
1224            let (address, type_tag) = &key;
1225            if let Some(budget) = input_reservations.get(&key).copied() {
1226                let net_withdrawn = -actual.min(0) as u128;
1227                assert_invariant!(
1228                    net_withdrawn <= budget as u128,
1229                    "Balance accumulator withdrawal exceeds reservation budget at address \
1230                    {address} for type {type_tag}: net Split {net_withdrawn}, budget {budget}"
1231                );
1232            } else if let Some(ptb_change) = ptb_changes.get(&key).copied() {
1233                // Runtime-emitted withdrawals at this (address, type) are bounded by Move's
1234                // net deposit at the same key: actual ≥ min(0, ptb_change). When Move
1235                // deposited (ptb_change > 0), the runtime may withdraw down to 0; when Move
1236                // withdrew (ptb_change < 0), the runtime may not withdraw further.
1237                assert_invariant!(
1238                    actual >= ptb_change.min(0),
1239                    "PTB-emitted Balance accumulator events do not cover runtime withdrawals \
1240                    at address {address} for type {type_tag}: PTB change {ptb_change}, net \
1241                    change {actual}"
1242                );
1243            } else {
1244                invariant_violation!(
1245                    "Unauthorized runtime Balance accumulator event at address {address} for \
1246                    type {type_tag}: net change {actual} (no input reservation, no PTB-emitted \
1247                    events)"
1248                );
1249            }
1250        }
1251
1252        Ok(())
1253    }
1254
1255    /// Check that this transaction neither creates nor destroys SUI.
1256    /// This more expensive check will check a third invariant on top of the 2 performed
1257    /// by `check_sui_conserved` above:
1258    ///
1259    /// * all SUI in input objects (including coins etc in the Move part of an object) should flow
1260    ///   either to an output object, or be burned as part of computation fees or non-refundable
1261    ///   storage rebate
1262    ///
1263    /// This function is intended to be called *after* we have charged for gas + applied the
1264    /// storage rebate to the gas object, but *before* we have updated object versions. The
1265    /// advance epoch transaction would mint `epoch_fees` amount of SUI, and burn `epoch_rebates`
1266    /// amount of SUI. We need these information for this check.
1267    pub fn check_sui_conserved_expensive(
1268        &self,
1269        gas_summary: &GasCostSummary,
1270        advance_epoch_gas_summary: Option<(u64, u64)>,
1271        layout_resolver: &mut impl LayoutResolver,
1272    ) -> Result<(), ExecutionError> {
1273        // total amount of SUI in input objects, including both coins and storage rebates
1274        let mut total_input_sui = 0;
1275        // total amount of SUI in output objects, including both coins and storage rebates
1276        let mut total_output_sui = 0;
1277
1278        // settlement input/output sui is used by the settlement transactions to account for
1279        // Sui that has been gathered from the accumulator writes of transactions which it is
1280        // settling.
1281        total_input_sui += self.execution_results.settlement_input_sui;
1282        total_output_sui += self.execution_results.settlement_output_sui;
1283
1284        for (id, input, output) in self.get_modified_objects() {
1285            if let Some(input) = input {
1286                total_input_sui += self.get_input_sui(&id, input.version, layout_resolver)?;
1287            }
1288            if let Some(object) = output {
1289                total_output_sui += object.get_total_sui(layout_resolver).map_err(|e| {
1290                    make_invariant_violation!(
1291                        "Failed looking up output SUI in SUI conservation checking for \
1292                         mutated type {:?}: {e:#?}",
1293                        object.struct_tag(),
1294                    )
1295                })?;
1296            }
1297        }
1298
1299        for event in &self.execution_results.accumulator_events {
1300            let (input, output) = event.total_sui_in_event();
1301            total_input_sui += input;
1302            total_output_sui += output;
1303        }
1304
1305        // note: storage_cost flows into the storage_rebate field of the output objects, which is
1306        // why it is not accounted for here.
1307        // similarly, all of the storage_rebate *except* the storage_fund_rebate_inflow
1308        // gets credited to the gas coin both computation costs and storage rebate inflow are
1309        total_output_sui += gas_summary.computation_cost + gas_summary.non_refundable_storage_fee;
1310        if let Some((epoch_fees, epoch_rebates)) = advance_epoch_gas_summary {
1311            total_input_sui += epoch_fees;
1312            total_output_sui += epoch_rebates;
1313        }
1314        if total_input_sui != total_output_sui {
1315            return Err(ExecutionError::invariant_violation(format!(
1316                "SUI conservation failed: input={}, output={}, \
1317                    this transaction either mints or burns SUI",
1318                total_input_sui, total_output_sui,
1319            )));
1320        }
1321        Ok(())
1322    }
1323}
1324
1325impl ChildObjectResolver for TemporaryStore<'_> {
1326    fn read_child_object(
1327        &self,
1328        parent: &ObjectID,
1329        child: &ObjectID,
1330        child_version_upper_bound: SequenceNumber,
1331    ) -> SuiResult<Option<Object>> {
1332        let obj_opt = self.execution_results.written_objects.get(child);
1333        if obj_opt.is_some() {
1334            Ok(obj_opt.cloned())
1335        } else {
1336            let _scope = monitored_scope("Execution::read_child_object");
1337            self.store
1338                .read_child_object(parent, child, child_version_upper_bound)
1339        }
1340    }
1341
1342    fn get_object_received_at_version(
1343        &self,
1344        owner: &ObjectID,
1345        receiving_object_id: &ObjectID,
1346        receive_object_at_version: SequenceNumber,
1347        epoch_id: EpochId,
1348    ) -> SuiResult<Option<Object>> {
1349        // You should never be able to try and receive an object after deleting it or writing it in the same
1350        // transaction since `Receiving` doesn't have copy.
1351        debug_assert!(
1352            !self
1353                .execution_results
1354                .written_objects
1355                .contains_key(receiving_object_id)
1356        );
1357        debug_assert!(
1358            !self
1359                .execution_results
1360                .deleted_object_ids
1361                .contains(receiving_object_id)
1362        );
1363        self.store.get_object_received_at_version(
1364            owner,
1365            receiving_object_id,
1366            receive_object_at_version,
1367            epoch_id,
1368        )
1369    }
1370}
1371
1372/// Compares the owner and payload of an object.
1373/// This is used to detect illegal writes to non-exclusive write objects.
1374fn was_object_mutated(object: &Object, original: &Object) -> bool {
1375    let data_equal = match (&object.data, &original.data) {
1376        (Data::Move(a), Data::Move(b)) => a.contents_and_type_equal(b),
1377        // We don't have a use for package content-equality, so we remain as strict as
1378        // possible for now.
1379        (Data::Package(a), Data::Package(b)) => a == b,
1380        _ => false,
1381    };
1382
1383    let owner_equal = match (&object.owner, &original.owner) {
1384        // We don't compare initial shared versions, because re-shared objects do not have the
1385        // correct initial shared version at this point in time, and this field is not something
1386        // that can be modified by a single transaction anyway.
1387        (Owner::Shared { .. }, Owner::Shared { .. }) => true,
1388        (
1389            Owner::ConsensusAddressOwner { owner: a, .. },
1390            Owner::ConsensusAddressOwner { owner: b, .. },
1391        ) => a == b,
1392        (Owner::AddressOwner(a), Owner::AddressOwner(b)) => a == b,
1393        (Owner::Immutable, Owner::Immutable) => true,
1394        (Owner::ObjectOwner(a), Owner::ObjectOwner(b)) => a == b,
1395
1396        // Keep the left hand side of the match exhaustive to catch future
1397        // changes to Owner
1398        (Owner::AddressOwner(_), _)
1399        | (Owner::Immutable, _)
1400        | (Owner::ObjectOwner(_), _)
1401        | (Owner::Shared { .. }, _)
1402        | (Owner::ConsensusAddressOwner { .. }, _) => false,
1403    };
1404
1405    !data_equal || !owner_equal
1406}
1407
1408impl Storage for TemporaryStore<'_> {
1409    fn reset(&mut self) {
1410        self.drop_writes();
1411    }
1412
1413    fn read_object(&self, id: &ObjectID) -> Option<&Object> {
1414        TemporaryStore::read_object(self, id)
1415    }
1416
1417    /// Take execution results v2, and translate it back to be compatible with effects v1.
1418    fn record_execution_results(
1419        &mut self,
1420        results: ExecutionResults,
1421    ) -> Result<(), ExecutionError> {
1422        let ExecutionResults::V2(mut results) = results else {
1423            panic!("ExecutionResults::V2 expected in sui-execution v1 and above");
1424        };
1425
1426        // for all non-exclusive write inputs, remove them from written objects
1427        let mut to_remove = Vec::new();
1428        for (id, original) in &self.non_exclusive_input_original_versions {
1429            // Object must be present in `written_objects` and identical
1430            if results
1431                .written_objects
1432                .get(id)
1433                .map(|obj| was_object_mutated(obj, original))
1434                .unwrap_or(true)
1435            {
1436                return Err(ExecutionError::new_with_source(
1437                    ExecutionErrorKind::NonExclusiveWriteInputObjectModified { id: *id },
1438                    "Non-exclusive write input object has been modified or deleted",
1439                ));
1440            }
1441            to_remove.push(*id);
1442        }
1443
1444        for id in to_remove {
1445            results.written_objects.remove(&id);
1446            results.modified_objects.remove(&id);
1447        }
1448
1449        // It's important to merge instead of override results because it's
1450        // possible to execute PT more than once during tx execution.
1451        // Track the index range of accumulator events brought in here as PTB-emitted; the
1452        // address-balance change invariant (run inside `run_conservation_checks`) uses this
1453        // set to distinguish trusted PTB-emitted events from runtime-emitted ones.
1454        let event_start = self.execution_results.accumulator_events.len();
1455        self.execution_results.merge_results(
1456            results, /* consistent_merge */ true, /* invariant_checks */ true,
1457        )?;
1458        let event_end = self.execution_results.accumulator_events.len();
1459        debug_assert!(
1460            event_start <= event_end,
1461            "merge_results should not shrink accumulator_events"
1462        );
1463        let (event_start, event_end) = (event_start.min(event_end), event_start.max(event_end));
1464        let range = event_start..event_end;
1465        match self.ptb_emitted_accumulator_event_ranges.last_mut() {
1466            // Coalesce with the previous PTB range if no runtime events were added in between.
1467            Some(last) if last.end == range.start => last.end = range.end,
1468            _ => self.ptb_emitted_accumulator_event_ranges.push(range),
1469        }
1470
1471        Ok(())
1472    }
1473
1474    fn save_loaded_runtime_objects(
1475        &mut self,
1476        loaded_runtime_objects: BTreeMap<ObjectID, DynamicallyLoadedObjectMetadata>,
1477    ) {
1478        TemporaryStore::save_loaded_runtime_objects(self, loaded_runtime_objects)
1479    }
1480
1481    fn save_wrapped_object_containers(
1482        &mut self,
1483        wrapped_object_containers: BTreeMap<ObjectID, ObjectID>,
1484    ) {
1485        TemporaryStore::save_wrapped_object_containers(self, wrapped_object_containers)
1486    }
1487
1488    fn check_coin_deny_list(
1489        &self,
1490        receiving_funds_type_and_owners: BTreeMap<TypeTag, BTreeSet<SuiAddress>>,
1491    ) -> DenyListResult {
1492        let result = check_coin_deny_list_v2_during_execution(
1493            receiving_funds_type_and_owners,
1494            self.cur_epoch,
1495            self.store.as_object_store(),
1496        );
1497        // The denylist object is only loaded if there are regulated transfers.
1498        // And also if we already have it in the input there is no need to commit it again in the effects.
1499        if result.num_non_gas_coin_owners > 0
1500            && !self.input_objects.contains_key(&SUI_DENY_LIST_OBJECT_ID)
1501        {
1502            self.loaded_per_epoch_config_objects
1503                .write()
1504                .insert(SUI_DENY_LIST_OBJECT_ID);
1505        }
1506        result
1507    }
1508
1509    fn record_generated_object_ids(&mut self, generated_ids: BTreeSet<ObjectID>) {
1510        TemporaryStore::save_generated_object_ids(self, generated_ids)
1511    }
1512}
1513
1514impl BackingPackageStore for TemporaryStore<'_> {
1515    fn get_package_object(&self, package_id: &ObjectID) -> SuiResult<Option<PackageObject>> {
1516        // We first check the objects in the temporary store because in non-production code path,
1517        // it is possible to read packages that are just written in the same transaction.
1518        // This can happen for example when we run the expensive conservation checks, where we may
1519        // look into the types of each written object in the output, and some of them need the
1520        // newly written packages for type checking.
1521        // In production path though, this should never happen.
1522        if let Some(obj) = self.execution_results.written_objects.get(package_id) {
1523            Ok(Some(PackageObject::new(obj.clone())))
1524        } else {
1525            self.store.get_package_object(package_id).inspect(|obj| {
1526                // Track object but leave unchanged
1527                if let Some(v) = obj
1528                    && !self
1529                        .runtime_packages_loaded_from_db
1530                        .read()
1531                        .contains_key(package_id)
1532                {
1533                    // TODO: Can this lock ever block execution?
1534                    // TODO: Another way to avoid the cost of maintaining this map is to not
1535                    // enable it in normal runs, and if a fork is detected, rerun it with a flag
1536                    // turned on and start populating this field.
1537                    self.runtime_packages_loaded_from_db
1538                        .write()
1539                        .insert(*package_id, v.clone());
1540                }
1541            })
1542        }
1543    }
1544}
1545
1546impl ParentSync for TemporaryStore<'_> {
1547    fn get_latest_parent_entry_ref_deprecated(&self, _object_id: ObjectID) -> Option<ObjectRef> {
1548        unreachable!("Never called in newer protocol versions")
1549    }
1550}