sui_adapter_latest/
temporary_store.rs

1// Copyright (c) Mysten Labs, Inc.
2// SPDX-License-Identifier: Apache-2.0
3
4use crate::gas_charger::GasCharger;
5use mysten_metrics::monitored_scope;
6use parking_lot::RwLock;
7use std::collections::{BTreeMap, BTreeSet, HashSet};
8use sui_protocol_config::ProtocolConfig;
9use sui_types::accumulator_event::AccumulatorEvent;
10use sui_types::base_types::VersionDigest;
11use sui_types::committee::EpochId;
12use sui_types::deny_list_v2::check_coin_deny_list_v2_during_execution;
13use sui_types::effects::{AccumulatorWriteV1, TransactionEffects, TransactionEvents};
14use sui_types::error::ExecutionErrorKind;
15use sui_types::execution::{
16    DynamicallyLoadedObjectMetadata, ExecutionResults, ExecutionResultsV2, SharedInput,
17};
18use sui_types::execution_status::ExecutionStatus;
19use sui_types::inner_temporary_store::InnerTemporaryStore;
20use sui_types::layout_resolver::LayoutResolver;
21use sui_types::object::Data;
22use sui_types::storage::{BackingStore, DenyListResult, PackageObject};
23use sui_types::sui_system_state::{AdvanceEpochParams, get_sui_system_state_wrapper};
24use sui_types::{
25    SUI_DENY_LIST_OBJECT_ID,
26    base_types::{ObjectID, ObjectRef, SequenceNumber, SuiAddress, TransactionDigest},
27    effects::EffectsObjectChange,
28    error::{ExecutionError, SuiResult},
29    gas::GasCostSummary,
30    object::Object,
31    object::Owner,
32    storage::{BackingPackageStore, ChildObjectResolver, ParentSync, Storage},
33    transaction::InputObjects,
34};
35use sui_types::{SUI_SYSTEM_STATE_OBJECT_ID, TypeTag, is_system_package};
36
37pub struct TemporaryStore<'backing> {
38    // The backing store for retrieving Move packages onchain.
39    // When executing a Move call, the dependent packages are not going to be
40    // in the input objects. They will be fetched from the backing store.
41    // Also used for fetching the backing parent_sync to get the last known version for wrapped
42    // objects
43    store: &'backing dyn BackingStore,
44    tx_digest: TransactionDigest,
45    input_objects: BTreeMap<ObjectID, Object>,
46
47    /// Store the original versions of the non-exclusive write inputs, in order to detect
48    /// mutations (which are illegal, but not prevented by the type system).
49    non_exclusive_input_original_versions: BTreeMap<ObjectID, Object>,
50
51    stream_ended_consensus_objects: BTreeMap<ObjectID, SequenceNumber /* start_version */>,
52    /// The version to assign to all objects written by the transaction using this store.
53    lamport_timestamp: SequenceNumber,
54    /// Inputs that will be mutated by the transaction. Does not include NonExclusiveWrite inputs,
55    /// which can be taken as `&mut T` but cannot be directly mutated.
56    mutable_input_refs: BTreeMap<ObjectID, (VersionDigest, Owner)>,
57    execution_results: ExecutionResultsV2,
58    /// Objects that were loaded during execution (dynamic fields + received objects).
59    loaded_runtime_objects: BTreeMap<ObjectID, DynamicallyLoadedObjectMetadata>,
60    /// A map from wrapped object to its container. Used during expensive invariant checks.
61    wrapped_object_containers: BTreeMap<ObjectID, ObjectID>,
62    protocol_config: &'backing ProtocolConfig,
63
64    /// Every package that was loaded from DB store during execution.
65    /// These packages were not previously loaded into the temporary store.
66    runtime_packages_loaded_from_db: RwLock<BTreeMap<ObjectID, PackageObject>>,
67
68    /// The set of objects that we may receive during execution. Not guaranteed to receive all, or
69    /// any of the objects referenced in this set.
70    receiving_objects: Vec<ObjectRef>,
71
72    /// The set of all generated object IDs from the object runtime during the transaction. This includes any
73    /// created-and-then-deleted objects in addition to any `new_ids` which contains only the set
74    /// of created (but not deleted) IDs in the transaction.
75    generated_runtime_ids: BTreeSet<ObjectID>,
76
77    // TODO: Now that we track epoch here, there are a few places we don't need to pass it around.
78    /// The current epoch.
79    cur_epoch: EpochId,
80
81    /// The set of per-epoch config objects that were loaded during execution, and are not in the
82    /// input objects. This allows us to commit them to the effects.
83    loaded_per_epoch_config_objects: RwLock<BTreeSet<ObjectID>>,
84}
85
86impl<'backing> TemporaryStore<'backing> {
87    /// Creates a new store associated with an authority store, and populates it with
88    /// initial objects.
89    pub fn new(
90        store: &'backing dyn BackingStore,
91        input_objects: InputObjects,
92        receiving_objects: Vec<ObjectRef>,
93        tx_digest: TransactionDigest,
94        protocol_config: &'backing ProtocolConfig,
95        cur_epoch: EpochId,
96    ) -> Self {
97        let mutable_input_refs = input_objects.exclusive_mutable_inputs();
98        let non_exclusive_input_original_versions = input_objects.non_exclusive_input_objects();
99
100        let lamport_timestamp = input_objects.lamport_timestamp(&receiving_objects);
101        let stream_ended_consensus_objects = input_objects.consensus_stream_ended_objects();
102        let objects = input_objects.into_object_map();
103        #[cfg(debug_assertions)]
104        {
105            // Ensure that input objects and receiving objects must not overlap.
106            assert!(
107                objects
108                    .keys()
109                    .collect::<HashSet<_>>()
110                    .intersection(
111                        &receiving_objects
112                            .iter()
113                            .map(|oref| &oref.0)
114                            .collect::<HashSet<_>>()
115                    )
116                    .next()
117                    .is_none()
118            );
119        }
120        Self {
121            store,
122            tx_digest,
123            input_objects: objects,
124            non_exclusive_input_original_versions,
125            stream_ended_consensus_objects,
126            lamport_timestamp,
127            mutable_input_refs,
128            execution_results: ExecutionResultsV2::default(),
129            protocol_config,
130            loaded_runtime_objects: BTreeMap::new(),
131            wrapped_object_containers: BTreeMap::new(),
132            runtime_packages_loaded_from_db: RwLock::new(BTreeMap::new()),
133            receiving_objects,
134            generated_runtime_ids: BTreeSet::new(),
135            cur_epoch,
136            loaded_per_epoch_config_objects: RwLock::new(BTreeSet::new()),
137        }
138    }
139
140    // Helpers to access private fields
141    pub fn objects(&self) -> &BTreeMap<ObjectID, Object> {
142        &self.input_objects
143    }
144
145    pub fn update_object_version_and_prev_tx(&mut self) {
146        self.execution_results.update_version_and_previous_tx(
147            self.lamport_timestamp,
148            self.tx_digest,
149            &self.input_objects,
150            self.protocol_config.reshare_at_same_initial_version(),
151        );
152
153        #[cfg(debug_assertions)]
154        {
155            self.check_invariants();
156        }
157    }
158
159    /// Break up the structure and return its internal stores (objects, active_inputs, written, deleted)
160    pub fn into_inner(self) -> InnerTemporaryStore {
161        let results = self.execution_results;
162        InnerTemporaryStore {
163            input_objects: self.input_objects,
164            stream_ended_consensus_objects: self.stream_ended_consensus_objects,
165            mutable_inputs: self.mutable_input_refs,
166            written: results.written_objects,
167            events: TransactionEvents {
168                data: results.user_events,
169            },
170            accumulator_events: results.accumulator_events,
171            loaded_runtime_objects: self.loaded_runtime_objects,
172            runtime_packages_loaded_from_db: self.runtime_packages_loaded_from_db.into_inner(),
173            lamport_version: self.lamport_timestamp,
174            binary_config: self.protocol_config.binary_config(None),
175        }
176    }
177
178    /// For every object from active_inputs (i.e. all mutable objects), if they are not
179    /// mutated during the transaction execution, force mutating them by incrementing the
180    /// sequence number. This is required to achieve safety.
181    pub(crate) fn ensure_active_inputs_mutated(&mut self) {
182        let mut to_be_updated = vec![];
183        // Note: we do not mutate input objects if they are non-exclusive write
184        for id in self.mutable_input_refs.keys() {
185            if !self.execution_results.modified_objects.contains(id) {
186                // We cannot update here but have to push to `to_be_updated` and update later
187                // because the for loop is holding a reference to `self`, and calling
188                // `self.mutate_input_object` requires a mutable reference to `self`.
189                to_be_updated.push(self.input_objects[id].clone());
190            }
191        }
192        for object in to_be_updated {
193            // The object must be mutated as it was present in the input objects
194            self.mutate_input_object(object.clone());
195        }
196    }
197
198    fn get_object_changes(&self) -> BTreeMap<ObjectID, EffectsObjectChange> {
199        let results = &self.execution_results;
200        let all_ids = results
201            .created_object_ids
202            .iter()
203            .chain(&results.deleted_object_ids)
204            .chain(&results.modified_objects)
205            .chain(results.written_objects.keys())
206            .collect::<BTreeSet<_>>();
207        all_ids
208            .into_iter()
209            .map(|id| {
210                (
211                    *id,
212                    EffectsObjectChange::new(
213                        self.get_object_modified_at(id)
214                            .map(|metadata| ((metadata.version, metadata.digest), metadata.owner)),
215                        results.written_objects.get(id),
216                        results.created_object_ids.contains(id),
217                        results.deleted_object_ids.contains(id),
218                    ),
219                )
220            })
221            .chain(results.accumulator_events.iter().cloned().map(
222                |AccumulatorEvent {
223                     accumulator_obj,
224                     write,
225                 }| {
226                    (
227                        *accumulator_obj.inner(),
228                        EffectsObjectChange::new_from_accumulator_write(write),
229                    )
230                },
231            ))
232            .collect()
233    }
234
235    pub fn into_effects(
236        mut self,
237        shared_object_refs: Vec<SharedInput>,
238        transaction_digest: &TransactionDigest,
239        mut transaction_dependencies: BTreeSet<TransactionDigest>,
240        gas_cost_summary: GasCostSummary,
241        status: ExecutionStatus,
242        gas_charger: &mut GasCharger,
243        epoch: EpochId,
244    ) -> (InnerTemporaryStore, TransactionEffects) {
245        self.update_object_version_and_prev_tx();
246
247        // Regardless of execution status (including aborts), we insert the previous transaction
248        // for any successfully received objects during the transaction.
249        for (id, expected_version, expected_digest) in &self.receiving_objects {
250            // If the receiving object is in the loaded runtime objects, then that means that it
251            // was actually successfully loaded (so existed, and there was authenticated mutable
252            // access to it). So we insert the previous transaction as a dependency.
253            if let Some(obj_meta) = self.loaded_runtime_objects.get(id) {
254                // Check that the expected version, digest, and owner match the loaded version,
255                // digest, and owner. If they don't then don't register a dependency.
256                // This is because this could be "spoofed" by loading a dynamic object field.
257                let loaded_via_receive = obj_meta.version == *expected_version
258                    && obj_meta.digest == *expected_digest
259                    && obj_meta.owner.is_address_owned();
260                if loaded_via_receive {
261                    transaction_dependencies.insert(obj_meta.previous_transaction);
262                }
263            }
264        }
265
266        assert!(self.protocol_config.enable_effects_v2());
267
268        // In the case of special transactions that don't require a gas object,
269        // we don't really care about the effects to gas, just use the input for it.
270        // Gas coins are guaranteed to be at least size 1 and if more than 1
271        // the first coin is where all the others are merged.
272        let gas_coin = gas_charger.gas_coin();
273
274        let object_changes = self.get_object_changes();
275
276        let lamport_version = self.lamport_timestamp;
277        // TODO: Cleanup this clone. Potentially add unchanged_shraed_objects directly to InnerTempStore.
278        let loaded_per_epoch_config_objects = self.loaded_per_epoch_config_objects.read().clone();
279        let inner = self.into_inner();
280
281        let effects = TransactionEffects::new_from_execution_v2(
282            status,
283            epoch,
284            gas_cost_summary,
285            // TODO: Provide the list of read-only shared objects directly.
286            shared_object_refs,
287            loaded_per_epoch_config_objects,
288            *transaction_digest,
289            lamport_version,
290            object_changes,
291            gas_coin,
292            if inner.events.data.is_empty() {
293                None
294            } else {
295                Some(inner.events.digest())
296            },
297            transaction_dependencies.into_iter().collect(),
298        );
299
300        (inner, effects)
301    }
302
303    /// An internal check of the invariants (will only fire in debug)
304    #[cfg(debug_assertions)]
305    fn check_invariants(&self) {
306        // Check not both deleted and written
307        debug_assert!(
308            {
309                self.execution_results
310                    .written_objects
311                    .keys()
312                    .all(|id| !self.execution_results.deleted_object_ids.contains(id))
313            },
314            "Object both written and deleted."
315        );
316
317        // Check all mutable inputs are modified
318        debug_assert!(
319            {
320                self.mutable_input_refs
321                    .keys()
322                    .all(|id| self.execution_results.modified_objects.contains(id))
323            },
324            "Mutable input not modified."
325        );
326
327        debug_assert!(
328            {
329                self.execution_results
330                    .written_objects
331                    .values()
332                    .all(|obj| obj.previous_transaction == self.tx_digest)
333            },
334            "Object previous transaction not properly set",
335        );
336    }
337
338    /// Mutate a mutable input object. This is used to mutate input objects outside of PT execution.
339    pub fn mutate_input_object(&mut self, object: Object) {
340        let id = object.id();
341        debug_assert!(self.input_objects.contains_key(&id));
342        debug_assert!(!object.is_immutable());
343        self.execution_results.modified_objects.insert(id);
344        self.execution_results.written_objects.insert(id, object);
345    }
346
347    /// Mutate a child object outside of PT. This should be used extremely rarely.
348    /// Currently it's only used by advance_epoch_safe_mode because it's all native
349    /// without PT. This should almost never be used otherwise.
350    pub fn mutate_child_object(&mut self, old_object: Object, new_object: Object) {
351        let id = new_object.id();
352        let old_ref = old_object.compute_object_reference();
353        debug_assert_eq!(old_ref.0, id);
354        self.loaded_runtime_objects.insert(
355            id,
356            DynamicallyLoadedObjectMetadata {
357                version: old_ref.1,
358                digest: old_ref.2,
359                owner: old_object.owner.clone(),
360                storage_rebate: old_object.storage_rebate,
361                previous_transaction: old_object.previous_transaction,
362            },
363        );
364        self.execution_results.modified_objects.insert(id);
365        self.execution_results
366            .written_objects
367            .insert(id, new_object);
368    }
369
370    /// Upgrade system package during epoch change. This requires special treatment
371    /// since the system package to be upgraded is not in the input objects.
372    /// We could probably fix above to make it less special.
373    pub fn upgrade_system_package(&mut self, package: Object) {
374        let id = package.id();
375        assert!(package.is_package() && is_system_package(id));
376        self.execution_results.modified_objects.insert(id);
377        self.execution_results.written_objects.insert(id, package);
378    }
379
380    /// Crate a new objcet. This is used to create objects outside of PT execution.
381    pub fn create_object(&mut self, object: Object) {
382        // Created mutable objects' versions are set to the store's lamport timestamp when it is
383        // committed to effects. Creating an object at a non-zero version risks violating the
384        // lamport timestamp invariant (that a transaction's lamport timestamp is strictly greater
385        // than all versions witnessed by the transaction).
386        debug_assert!(
387            object.is_immutable() || object.version() == SequenceNumber::MIN,
388            "Created mutable objects should not have a version set",
389        );
390        let id = object.id();
391        self.execution_results.created_object_ids.insert(id);
392        self.execution_results.written_objects.insert(id, object);
393    }
394
395    /// Delete a mutable input object. This is used to delete input objects outside of PT execution.
396    pub fn delete_input_object(&mut self, id: &ObjectID) {
397        // there should be no deletion after write
398        debug_assert!(!self.execution_results.written_objects.contains_key(id));
399        debug_assert!(self.input_objects.contains_key(id));
400        self.execution_results.modified_objects.insert(*id);
401        self.execution_results.deleted_object_ids.insert(*id);
402    }
403
404    pub fn drop_writes(&mut self) {
405        self.execution_results.drop_writes();
406    }
407
408    pub fn read_object(&self, id: &ObjectID) -> Option<&Object> {
409        // there should be no read after delete
410        debug_assert!(!self.execution_results.deleted_object_ids.contains(id));
411        self.execution_results
412            .written_objects
413            .get(id)
414            .or_else(|| self.input_objects.get(id))
415    }
416
417    pub fn save_loaded_runtime_objects(
418        &mut self,
419        loaded_runtime_objects: BTreeMap<ObjectID, DynamicallyLoadedObjectMetadata>,
420    ) {
421        #[cfg(debug_assertions)]
422        {
423            for (id, v1) in &loaded_runtime_objects {
424                if let Some(v2) = self.loaded_runtime_objects.get(id) {
425                    assert_eq!(v1, v2);
426                }
427            }
428            for (id, v1) in &self.loaded_runtime_objects {
429                if let Some(v2) = loaded_runtime_objects.get(id) {
430                    assert_eq!(v1, v2);
431                }
432            }
433        }
434        // Merge the two maps because we may be calling the execution engine more than once
435        // (e.g. in advance epoch transaction, where we may be publishing a new system package).
436        self.loaded_runtime_objects.extend(loaded_runtime_objects);
437    }
438
439    pub fn save_wrapped_object_containers(
440        &mut self,
441        wrapped_object_containers: BTreeMap<ObjectID, ObjectID>,
442    ) {
443        #[cfg(debug_assertions)]
444        {
445            for (id, container1) in &wrapped_object_containers {
446                if let Some(container2) = self.wrapped_object_containers.get(id) {
447                    assert_eq!(container1, container2);
448                }
449            }
450            for (id, container1) in &self.wrapped_object_containers {
451                if let Some(container2) = wrapped_object_containers.get(id) {
452                    assert_eq!(container1, container2);
453                }
454            }
455        }
456        // Merge the two maps because we may be calling the execution engine more than once
457        // (e.g. in advance epoch transaction, where we may be publishing a new system package).
458        self.wrapped_object_containers
459            .extend(wrapped_object_containers);
460    }
461
462    pub fn save_generated_object_ids(&mut self, generated_ids: BTreeSet<ObjectID>) {
463        #[cfg(debug_assertions)]
464        {
465            for id in &self.generated_runtime_ids {
466                assert!(!generated_ids.contains(id))
467            }
468            for id in &generated_ids {
469                assert!(!self.generated_runtime_ids.contains(id));
470            }
471        }
472        self.generated_runtime_ids.extend(generated_ids);
473    }
474
475    pub fn estimate_effects_size_upperbound(&self) -> usize {
476        TransactionEffects::estimate_effects_size_upperbound_v2(
477            self.execution_results.written_objects.len(),
478            self.execution_results.modified_objects.len(),
479            self.input_objects.len(),
480        )
481    }
482
483    pub fn written_objects_size(&self) -> usize {
484        self.execution_results
485            .written_objects
486            .values()
487            .fold(0, |sum, obj| sum + obj.object_size_for_gas_metering())
488    }
489
490    /// If there are unmetered storage rebate (due to system transaction), we put them into
491    /// the storage rebate of 0x5 object.
492    /// TODO: This will not work for potential future new system transactions if 0x5 is not in the input.
493    /// We should fix this.
494    pub fn conserve_unmetered_storage_rebate(&mut self, unmetered_storage_rebate: u64) {
495        if unmetered_storage_rebate == 0 {
496            // If unmetered_storage_rebate is 0, we are most likely executing the genesis transaction.
497            // And in that case we cannot mutate the 0x5 object because it's newly created.
498            // And there is no storage rebate that needs distribution anyway.
499            return;
500        }
501        tracing::debug!(
502            "Amount of unmetered storage rebate from system tx: {:?}",
503            unmetered_storage_rebate
504        );
505        let mut system_state_wrapper = self
506            .read_object(&SUI_SYSTEM_STATE_OBJECT_ID)
507            .expect("0x5 object must be mutated in system tx with unmetered storage rebate")
508            .clone();
509        // In unmetered execution, storage_rebate field of mutated object must be 0.
510        // If not, we would be dropping SUI on the floor by overriding it.
511        assert_eq!(system_state_wrapper.storage_rebate, 0);
512        system_state_wrapper.storage_rebate = unmetered_storage_rebate;
513        self.mutate_input_object(system_state_wrapper);
514    }
515
516    /// Add an accumulator event to the execution results, merging with any existing
517    /// event for the same accumulator object.
518    pub fn add_accumulator_event(&mut self, event: AccumulatorEvent) {
519        let obj_id = *event.accumulator_obj.inner();
520        for existing in self.execution_results.accumulator_events.iter_mut() {
521            if *existing.accumulator_obj.inner() == obj_id {
522                existing.write =
523                    AccumulatorWriteV1::merge(vec![existing.write.clone(), event.write]);
524                return;
525            }
526        }
527        self.execution_results.accumulator_events.push(event);
528    }
529
530    /// Given an object ID, if it's not modified, returns None.
531    /// Otherwise returns its metadata, including version, digest, owner and storage rebate.
532    /// A modified object must be either a mutable input, or a loaded child object.
533    /// The only exception is when we upgrade system packages, in which case the upgraded
534    /// system packages are not part of input, but are modified.
535    fn get_object_modified_at(
536        &self,
537        object_id: &ObjectID,
538    ) -> Option<DynamicallyLoadedObjectMetadata> {
539        if self.execution_results.modified_objects.contains(object_id) {
540            Some(
541                self.mutable_input_refs
542                    .get(object_id)
543                    .map(
544                        |((version, digest), owner)| DynamicallyLoadedObjectMetadata {
545                            version: *version,
546                            digest: *digest,
547                            owner: owner.clone(),
548                            // It's guaranteed that a mutable input object is an input object.
549                            storage_rebate: self.input_objects[object_id].storage_rebate,
550                            previous_transaction: self.input_objects[object_id]
551                                .previous_transaction,
552                        },
553                    )
554                    .or_else(|| self.loaded_runtime_objects.get(object_id).cloned())
555                    .unwrap_or_else(|| {
556                        debug_assert!(is_system_package(*object_id));
557                        let package_obj =
558                            self.store.get_package_object(object_id).unwrap().unwrap();
559                        let obj = package_obj.object();
560                        DynamicallyLoadedObjectMetadata {
561                            version: obj.version(),
562                            digest: obj.digest(),
563                            owner: obj.owner.clone(),
564                            storage_rebate: obj.storage_rebate,
565                            previous_transaction: obj.previous_transaction,
566                        }
567                    }),
568            )
569        } else {
570            None
571        }
572    }
573}
574
575impl TemporaryStore<'_> {
576    // check that every object read is owned directly or indirectly by sender, sponsor,
577    // or a shared object input
578    pub fn check_ownership_invariants(
579        &self,
580        sender: &SuiAddress,
581        sponsor: &Option<SuiAddress>,
582        gas_charger: &mut GasCharger,
583        mutable_inputs: &HashSet<ObjectID>,
584        is_epoch_change: bool,
585    ) -> SuiResult<()> {
586        let gas_objs: HashSet<&ObjectID> = gas_charger.gas_coins().map(|g| &g.0).collect();
587        let gas_owner = sponsor.as_ref().unwrap_or(sender);
588
589        // mark input objects as authenticated
590        let mut authenticated_for_mutation: HashSet<_> = self
591            .input_objects
592            .iter()
593            .filter_map(|(id, obj)| {
594                match &obj.owner {
595                    Owner::AddressOwner(a) => {
596                        if gas_objs.contains(id) {
597                            // gas object must be owned by sender or sponsor
598                            assert!(
599                                a == gas_owner,
600                                "Gas object must be owned by sender or sponsor"
601                            );
602                        } else {
603                            assert!(sender == a, "Input object must be owned by sender");
604                        }
605                        Some(id)
606                    }
607                    Owner::Shared { .. } | Owner::ConsensusAddressOwner { .. } => Some(id),
608                    Owner::Immutable => {
609                        // object is authenticated, but it cannot own other objects,
610                        // so we should not add it to `authenticated_objs`
611                        // However, we would definitely want to add immutable objects
612                        // to the set of authenticated roots if we were doing runtime
613                        // checks inside the VM instead of after-the-fact in the temporary
614                        // store. Here, we choose not to add them because this will catch a
615                        // bug where we mutate or delete an object that belongs to an immutable
616                        // object (though it will show up somewhat opaquely as an authentication
617                        // failure), whereas adding the immutable object to the roots will prevent
618                        // us from catching this.
619                        None
620                    }
621                    Owner::ObjectOwner(_parent) => {
622                        unreachable!(
623                            "Input objects must be address owned, shared, consensus, or immutable"
624                        )
625                    }
626                }
627            })
628            .filter(|id| {
629                // remove any non-mutable inputs. This will remove deleted or readonly shared
630                // objects
631                mutable_inputs.contains(id)
632            })
633            .copied()
634            // Add any object IDs generated in the object runtime during execution to the
635            // authenticated set (i.e., new (non-package) objects, and possibly ephemeral UIDs).
636            .chain(self.generated_runtime_ids.iter().copied())
637            .collect();
638
639        // Add sender and sponsor (if present) to authenticated set
640        authenticated_for_mutation.insert((*sender).into());
641        if let Some(sponsor) = sponsor {
642            authenticated_for_mutation.insert((*sponsor).into());
643        }
644
645        // check all modified objects are authenticated
646        let mut objects_to_authenticate = self
647            .execution_results
648            .modified_objects
649            .iter()
650            .copied()
651            .collect::<Vec<_>>();
652
653        while let Some(to_authenticate) = objects_to_authenticate.pop() {
654            if authenticated_for_mutation.contains(&to_authenticate) {
655                // object has already been authenticated
656                continue;
657            }
658
659            let parent = if let Some(container_id) =
660                self.wrapped_object_containers.get(&to_authenticate)
661            {
662                // It's a wrapped object, so check that the container is authenticated
663                *container_id
664            } else {
665                // It's non-wrapped, so check the owner -- we can load the object from the
666                // store.
667                let Some(old_obj) = self.store.get_object(&to_authenticate) else {
668                    panic!(
669                        "Failed to load object {to_authenticate:?}.\n \
670                         If it cannot be loaded, we would expect it to be in the wrapped object map: {:#?}",
671                        &self.wrapped_object_containers
672                    )
673                };
674
675                match &old_obj.owner {
676                    // We mutated a dynamic field, we can continue to trace this back to verify
677                    // proper ownership.
678                    Owner::ObjectOwner(parent) => ObjectID::from(*parent),
679                    // We mutated an address owned or sequenced address owned object -- one of two cases apply:
680                    // 1) the object is owned by an object or address in the authenticated set,
681                    // 2) the object is owned by some other address, in which case we should
682                    //    continue to trace this back.
683                    Owner::AddressOwner(parent)
684                    | Owner::ConsensusAddressOwner { owner: parent, .. } => {
685                        // For Receiving<_> objects, the address owner is actually an object.
686                        // If it was actually an address, we should have caught it as an input and
687                        // it would already have been in authenticated_for_mutation
688                        ObjectID::from(*parent)
689                    }
690                    // We mutated a shared object -- we checked if this object was in the
691                    // authenticated set at the top of this loop and it wasn't so this is a failure.
692                    owner @ Owner::Shared { .. } => {
693                        panic!(
694                            "Unauthenticated root at {to_authenticate:?} with owner {owner:?}\n\
695                             Potentially covering objects in: {authenticated_for_mutation:#?}"
696                        );
697                    }
698                    Owner::Immutable => {
699                        assert!(
700                            is_epoch_change,
701                            "Immutable objects cannot be written, except for \
702                             Sui Framework/Move stdlib upgrades at epoch change boundaries"
703                        );
704                        // Note: this assumes that the only immutable objects an epoch change
705                        // tx can update are system packages,
706                        // but in principle we could allow others.
707                        assert!(
708                            is_system_package(to_authenticate),
709                            "Only system packages can be upgraded"
710                        );
711                        continue;
712                    }
713                }
714            };
715
716            // we now assume the object is authenticated and check the parent
717            authenticated_for_mutation.insert(to_authenticate);
718            objects_to_authenticate.push(parent);
719        }
720        Ok(())
721    }
722}
723
724impl TemporaryStore<'_> {
725    /// Track storage gas for each mutable input object (including the gas coin)
726    /// and each created object. Compute storage refunds for each deleted object.
727    /// Will *not* charge anything, gas status keeps track of storage cost and rebate.
728    /// All objects will be updated with their new (current) storage rebate/cost.
729    /// `SuiGasStatus` `storage_rebate` and `storage_gas_units` track the transaction
730    /// overall storage rebate and cost.
731    pub(crate) fn collect_storage_and_rebate(&mut self, gas_charger: &mut GasCharger) {
732        // Use two loops because we cannot mut iterate written while calling get_object_modified_at.
733        let old_storage_rebates: Vec<_> = self
734            .execution_results
735            .written_objects
736            .keys()
737            .map(|object_id| {
738                self.get_object_modified_at(object_id)
739                    .map(|metadata| metadata.storage_rebate)
740                    .unwrap_or_default()
741            })
742            .collect();
743        for (object, old_storage_rebate) in self
744            .execution_results
745            .written_objects
746            .values_mut()
747            .zip(old_storage_rebates)
748        {
749            // new object size
750            let new_object_size = object.object_size_for_gas_metering();
751            // track changes and compute the new object `storage_rebate`
752            let new_storage_rebate = gas_charger.track_storage_mutation(
753                object.id(),
754                new_object_size,
755                old_storage_rebate,
756            );
757            object.storage_rebate = new_storage_rebate;
758        }
759
760        self.collect_rebate(gas_charger);
761    }
762
763    pub(crate) fn collect_rebate(&self, gas_charger: &mut GasCharger) {
764        for object_id in &self.execution_results.modified_objects {
765            if self
766                .execution_results
767                .written_objects
768                .contains_key(object_id)
769            {
770                continue;
771            }
772            // get and track the deleted object `storage_rebate`
773            let storage_rebate = self
774                .get_object_modified_at(object_id)
775                // Unwrap is safe because this loop iterates through all modified objects.
776                .unwrap()
777                .storage_rebate;
778            gas_charger.track_storage_mutation(*object_id, 0, storage_rebate);
779        }
780    }
781
782    pub fn check_execution_results_consistency(&self) -> Result<(), ExecutionError> {
783        assert_invariant!(
784            self.execution_results
785                .created_object_ids
786                .iter()
787                .all(|id| !self.execution_results.deleted_object_ids.contains(id)
788                    && !self.execution_results.modified_objects.contains(id)),
789            "Created object IDs cannot also be deleted or modified"
790        );
791        assert_invariant!(
792            self.execution_results.modified_objects.iter().all(|id| {
793                self.mutable_input_refs.contains_key(id)
794                    || self.loaded_runtime_objects.contains_key(id)
795                    || is_system_package(*id)
796            }),
797            "A modified object must be either a mutable input, a loaded child object, or a system package"
798        );
799        Ok(())
800    }
801}
802//==============================================================================
803// Charge gas current - end
804//==============================================================================
805
806impl TemporaryStore<'_> {
807    pub fn advance_epoch_safe_mode(
808        &mut self,
809        params: &AdvanceEpochParams,
810        protocol_config: &ProtocolConfig,
811    ) {
812        let wrapper = get_sui_system_state_wrapper(self.store.as_object_store())
813            .expect("System state wrapper object must exist");
814        let (old_object, new_object) =
815            wrapper.advance_epoch_safe_mode(params, self.store.as_object_store(), protocol_config);
816        self.mutate_child_object(old_object, new_object);
817    }
818}
819
820type ModifiedObjectInfo<'a> = (
821    ObjectID,
822    // old object metadata, including version, digest, owner, and storage rebate.
823    Option<DynamicallyLoadedObjectMetadata>,
824    Option<&'a Object>,
825);
826
827impl TemporaryStore<'_> {
828    fn get_input_sui(
829        &self,
830        id: &ObjectID,
831        expected_version: SequenceNumber,
832        layout_resolver: &mut impl LayoutResolver,
833    ) -> Result<u64, ExecutionError> {
834        if let Some(obj) = self.input_objects.get(id) {
835            // the assumption here is that if it is in the input objects must be the right one
836            if obj.version() != expected_version {
837                invariant_violation!(
838                    "Version mismatching when resolving input object to check conservation--\
839                     expected {}, got {}",
840                    expected_version,
841                    obj.version(),
842                );
843            }
844            obj.get_total_sui(layout_resolver).map_err(|e| {
845                make_invariant_violation!(
846                    "Failed looking up input SUI in SUI conservation checking for input with \
847                         type {:?}: {e:#?}",
848                    obj.struct_tag(),
849                )
850            })
851        } else {
852            // not in input objects, must be a dynamic field
853            let Some(obj) = self.store.get_object_by_key(id, expected_version) else {
854                invariant_violation!(
855                    "Failed looking up dynamic field {id} in SUI conservation checking"
856                );
857            };
858            obj.get_total_sui(layout_resolver).map_err(|e| {
859                make_invariant_violation!(
860                    "Failed looking up input SUI in SUI conservation checking for type \
861                         {:?}: {e:#?}",
862                    obj.struct_tag(),
863                )
864            })
865        }
866    }
867
868    /// Return the list of all modified objects, for each object, returns
869    /// - Object ID,
870    /// - Input: If the object existed prior to this transaction, include their version and storage_rebate,
871    /// - Output: If a new version of the object is written, include the new object.
872    fn get_modified_objects(&self) -> Vec<ModifiedObjectInfo<'_>> {
873        self.execution_results
874            .modified_objects
875            .iter()
876            .map(|id| {
877                let metadata = self.get_object_modified_at(id);
878                let output = self.execution_results.written_objects.get(id);
879                (*id, metadata, output)
880            })
881            .chain(
882                self.execution_results
883                    .written_objects
884                    .iter()
885                    .filter_map(|(id, object)| {
886                        if self.execution_results.modified_objects.contains(id) {
887                            None
888                        } else {
889                            Some((*id, None, Some(object)))
890                        }
891                    }),
892            )
893            .collect()
894    }
895
896    /// Check that this transaction neither creates nor destroys SUI. This should hold for all txes
897    /// except the epoch change tx, which mints staking rewards equal to the gas fees burned in the
898    /// previous epoch.  Specifically, this checks two key invariants about storage
899    /// fees and storage rebate:
900    ///
901    /// 1. all SUI in storage rebate fields of input objects should flow either to the transaction
902    ///    storage rebate, or the transaction non-refundable storage rebate
903    /// 2. all SUI charged for storage should flow into the storage rebate field of some output
904    ///    object
905    ///
906    /// This function is intended to be called *after* we have charged for
907    /// gas + applied the storage rebate to the gas object, but *before* we
908    /// have updated object versions.
909    pub fn check_sui_conserved(
910        &self,
911        simple_conservation_checks: bool,
912        gas_summary: &GasCostSummary,
913    ) -> Result<(), ExecutionError> {
914        if !simple_conservation_checks {
915            return Ok(());
916        }
917        // total amount of SUI in storage rebate of input objects
918        let mut total_input_rebate = 0;
919        // total amount of SUI in storage rebate of output objects
920        let mut total_output_rebate = 0;
921        for (_, input, output) in self.get_modified_objects() {
922            if let Some(input) = input {
923                total_input_rebate += input.storage_rebate;
924            }
925            if let Some(object) = output {
926                total_output_rebate += object.storage_rebate;
927            }
928        }
929
930        if gas_summary.storage_cost == 0 {
931            // this condition is usually true when the transaction went OOG and no
932            // gas is left for storage charges.
933            // The storage cost has to be there at least for the gas coin which
934            // will not be deleted even when going to 0.
935            // However if the storage cost is 0 and if there is any object touched
936            // or deleted the value in input must be equal to the output plus rebate and
937            // non refundable.
938            // Rebate and non refundable will be positive when there are object deleted
939            // (gas smashing being the primary and possibly only example).
940            // A more typical condition is for all storage charges in summary to be 0 and
941            // then input and output must be the same value
942            if total_input_rebate
943                != total_output_rebate
944                    + gas_summary.storage_rebate
945                    + gas_summary.non_refundable_storage_fee
946            {
947                return Err(ExecutionError::invariant_violation(format!(
948                    "SUI conservation failed -- no storage charges in gas summary \
949                        and total storage input rebate {} not equal  \
950                        to total storage output rebate {}",
951                    total_input_rebate, total_output_rebate,
952                )));
953            }
954        } else {
955            // all SUI in storage rebate fields of input objects should flow either to
956            // the transaction storage rebate, or the non-refundable storage rebate pool
957            if total_input_rebate
958                != gas_summary.storage_rebate + gas_summary.non_refundable_storage_fee
959            {
960                return Err(ExecutionError::invariant_violation(format!(
961                    "SUI conservation failed -- {} SUI in storage rebate field of input objects, \
962                        {} SUI in tx storage rebate or tx non-refundable storage rebate",
963                    total_input_rebate, gas_summary.non_refundable_storage_fee,
964                )));
965            }
966
967            // all SUI charged for storage should flow into the storage rebate field
968            // of some output object
969            if gas_summary.storage_cost != total_output_rebate {
970                return Err(ExecutionError::invariant_violation(format!(
971                    "SUI conservation failed -- {} SUI charged for storage, \
972                        {} SUI in storage rebate field of output objects",
973                    gas_summary.storage_cost, total_output_rebate
974                )));
975            }
976        }
977        Ok(())
978    }
979
980    /// Check that this transaction neither creates nor destroys SUI.
981    /// This more expensive check will check a third invariant on top of the 2 performed
982    /// by `check_sui_conserved` above:
983    ///
984    /// * all SUI in input objects (including coins etc in the Move part of an object) should flow
985    ///   either to an output object, or be burned as part of computation fees or non-refundable
986    ///   storage rebate
987    ///
988    /// This function is intended to be called *after* we have charged for gas + applied the
989    /// storage rebate to the gas object, but *before* we have updated object versions. The
990    /// advance epoch transaction would mint `epoch_fees` amount of SUI, and burn `epoch_rebates`
991    /// amount of SUI. We need these information for this check.
992    pub fn check_sui_conserved_expensive(
993        &self,
994        gas_summary: &GasCostSummary,
995        advance_epoch_gas_summary: Option<(u64, u64)>,
996        layout_resolver: &mut impl LayoutResolver,
997    ) -> Result<(), ExecutionError> {
998        // total amount of SUI in input objects, including both coins and storage rebates
999        let mut total_input_sui = 0;
1000        // total amount of SUI in output objects, including both coins and storage rebates
1001        let mut total_output_sui = 0;
1002
1003        // settlement input/output sui is used by the settlement transactions to account for
1004        // Sui that has been gathered from the accumulator writes of transactions which it is
1005        // settling.
1006        total_input_sui += self.execution_results.settlement_input_sui;
1007        total_output_sui += self.execution_results.settlement_output_sui;
1008
1009        for (id, input, output) in self.get_modified_objects() {
1010            if let Some(input) = input {
1011                total_input_sui += self.get_input_sui(&id, input.version, layout_resolver)?;
1012            }
1013            if let Some(object) = output {
1014                total_output_sui += object.get_total_sui(layout_resolver).map_err(|e| {
1015                    make_invariant_violation!(
1016                        "Failed looking up output SUI in SUI conservation checking for \
1017                         mutated type {:?}: {e:#?}",
1018                        object.struct_tag(),
1019                    )
1020                })?;
1021            }
1022        }
1023
1024        for event in &self.execution_results.accumulator_events {
1025            let (input, output) = event.total_sui_in_event();
1026            total_input_sui += input;
1027            total_output_sui += output;
1028        }
1029
1030        // note: storage_cost flows into the storage_rebate field of the output objects, which is
1031        // why it is not accounted for here.
1032        // similarly, all of the storage_rebate *except* the storage_fund_rebate_inflow
1033        // gets credited to the gas coin both computation costs and storage rebate inflow are
1034        total_output_sui += gas_summary.computation_cost + gas_summary.non_refundable_storage_fee;
1035        if let Some((epoch_fees, epoch_rebates)) = advance_epoch_gas_summary {
1036            total_input_sui += epoch_fees;
1037            total_output_sui += epoch_rebates;
1038        }
1039        if total_input_sui != total_output_sui {
1040            return Err(ExecutionError::invariant_violation(format!(
1041                "SUI conservation failed: input={}, output={}, \
1042                    this transaction either mints or burns SUI",
1043                total_input_sui, total_output_sui,
1044            )));
1045        }
1046        Ok(())
1047    }
1048}
1049
1050impl ChildObjectResolver for TemporaryStore<'_> {
1051    fn read_child_object(
1052        &self,
1053        parent: &ObjectID,
1054        child: &ObjectID,
1055        child_version_upper_bound: SequenceNumber,
1056    ) -> SuiResult<Option<Object>> {
1057        let obj_opt = self.execution_results.written_objects.get(child);
1058        if obj_opt.is_some() {
1059            Ok(obj_opt.cloned())
1060        } else {
1061            let _scope = monitored_scope("Execution::read_child_object");
1062            self.store
1063                .read_child_object(parent, child, child_version_upper_bound)
1064        }
1065    }
1066
1067    fn get_object_received_at_version(
1068        &self,
1069        owner: &ObjectID,
1070        receiving_object_id: &ObjectID,
1071        receive_object_at_version: SequenceNumber,
1072        epoch_id: EpochId,
1073    ) -> SuiResult<Option<Object>> {
1074        // You should never be able to try and receive an object after deleting it or writing it in the same
1075        // transaction since `Receiving` doesn't have copy.
1076        debug_assert!(
1077            !self
1078                .execution_results
1079                .written_objects
1080                .contains_key(receiving_object_id)
1081        );
1082        debug_assert!(
1083            !self
1084                .execution_results
1085                .deleted_object_ids
1086                .contains(receiving_object_id)
1087        );
1088        self.store.get_object_received_at_version(
1089            owner,
1090            receiving_object_id,
1091            receive_object_at_version,
1092            epoch_id,
1093        )
1094    }
1095}
1096
1097/// Compares the owner and payload of an object.
1098/// This is used to detect illegal writes to non-exclusive write objects.
1099fn was_object_mutated(object: &Object, original: &Object) -> bool {
1100    let data_equal = match (&object.data, &original.data) {
1101        (Data::Move(a), Data::Move(b)) => a.contents_and_type_equal(b),
1102        // We don't have a use for package content-equality, so we remain as strict as
1103        // possible for now.
1104        (Data::Package(a), Data::Package(b)) => a == b,
1105        _ => false,
1106    };
1107
1108    let owner_equal = match (&object.owner, &original.owner) {
1109        // We don't compare initial shared versions, because re-shared objects do not have the
1110        // correct initial shared version at this point in time, and this field is not something
1111        // that can be modified by a single transaction anyway.
1112        (Owner::Shared { .. }, Owner::Shared { .. }) => true,
1113        (
1114            Owner::ConsensusAddressOwner { owner: a, .. },
1115            Owner::ConsensusAddressOwner { owner: b, .. },
1116        ) => a == b,
1117        (Owner::AddressOwner(a), Owner::AddressOwner(b)) => a == b,
1118        (Owner::Immutable, Owner::Immutable) => true,
1119        (Owner::ObjectOwner(a), Owner::ObjectOwner(b)) => a == b,
1120
1121        // Keep the left hand side of the match exhaustive to catch future
1122        // changes to Owner
1123        (Owner::AddressOwner(_), _)
1124        | (Owner::Immutable, _)
1125        | (Owner::ObjectOwner(_), _)
1126        | (Owner::Shared { .. }, _)
1127        | (Owner::ConsensusAddressOwner { .. }, _) => false,
1128    };
1129
1130    !data_equal || !owner_equal
1131}
1132
1133impl Storage for TemporaryStore<'_> {
1134    fn reset(&mut self) {
1135        self.drop_writes();
1136    }
1137
1138    fn read_object(&self, id: &ObjectID) -> Option<&Object> {
1139        TemporaryStore::read_object(self, id)
1140    }
1141
1142    /// Take execution results v2, and translate it back to be compatible with effects v1.
1143    fn record_execution_results(
1144        &mut self,
1145        results: ExecutionResults,
1146    ) -> Result<(), ExecutionError> {
1147        let ExecutionResults::V2(mut results) = results else {
1148            panic!("ExecutionResults::V2 expected in sui-execution v1 and above");
1149        };
1150
1151        // for all non-exclusive write inputs, remove them from written objects
1152        let mut to_remove = Vec::new();
1153        for (id, original) in &self.non_exclusive_input_original_versions {
1154            // Object must be present in `written_objects` and identical
1155            if results
1156                .written_objects
1157                .get(id)
1158                .map(|obj| was_object_mutated(obj, original))
1159                .unwrap_or(true)
1160            {
1161                return Err(ExecutionError::new_with_source(
1162                    ExecutionErrorKind::NonExclusiveWriteInputObjectModified { id: *id },
1163                    "Non-exclusive write input object has been modified or deleted",
1164                ));
1165            }
1166            to_remove.push(*id);
1167        }
1168
1169        for id in to_remove {
1170            results.written_objects.remove(&id);
1171            results.modified_objects.remove(&id);
1172        }
1173
1174        // It's important to merge instead of override results because it's
1175        // possible to execute PT more than once during tx execution.
1176        self.execution_results.merge_results(results);
1177
1178        Ok(())
1179    }
1180
1181    fn save_loaded_runtime_objects(
1182        &mut self,
1183        loaded_runtime_objects: BTreeMap<ObjectID, DynamicallyLoadedObjectMetadata>,
1184    ) {
1185        TemporaryStore::save_loaded_runtime_objects(self, loaded_runtime_objects)
1186    }
1187
1188    fn save_wrapped_object_containers(
1189        &mut self,
1190        wrapped_object_containers: BTreeMap<ObjectID, ObjectID>,
1191    ) {
1192        TemporaryStore::save_wrapped_object_containers(self, wrapped_object_containers)
1193    }
1194
1195    fn check_coin_deny_list(
1196        &self,
1197        receiving_funds_type_and_owners: BTreeMap<TypeTag, BTreeSet<SuiAddress>>,
1198    ) -> DenyListResult {
1199        let result = check_coin_deny_list_v2_during_execution(
1200            receiving_funds_type_and_owners,
1201            self.cur_epoch,
1202            self.store.as_object_store(),
1203        );
1204        // The denylist object is only loaded if there are regulated transfers.
1205        // And also if we already have it in the input there is no need to commit it again in the effects.
1206        if result.num_non_gas_coin_owners > 0
1207            && !self.input_objects.contains_key(&SUI_DENY_LIST_OBJECT_ID)
1208        {
1209            self.loaded_per_epoch_config_objects
1210                .write()
1211                .insert(SUI_DENY_LIST_OBJECT_ID);
1212        }
1213        result
1214    }
1215
1216    fn record_generated_object_ids(&mut self, generated_ids: BTreeSet<ObjectID>) {
1217        TemporaryStore::save_generated_object_ids(self, generated_ids)
1218    }
1219}
1220
1221impl BackingPackageStore for TemporaryStore<'_> {
1222    fn get_package_object(&self, package_id: &ObjectID) -> SuiResult<Option<PackageObject>> {
1223        // We first check the objects in the temporary store because in non-production code path,
1224        // it is possible to read packages that are just written in the same transaction.
1225        // This can happen for example when we run the expensive conservation checks, where we may
1226        // look into the types of each written object in the output, and some of them need the
1227        // newly written packages for type checking.
1228        // In production path though, this should never happen.
1229        if let Some(obj) = self.execution_results.written_objects.get(package_id) {
1230            Ok(Some(PackageObject::new(obj.clone())))
1231        } else {
1232            self.store.get_package_object(package_id).inspect(|obj| {
1233                // Track object but leave unchanged
1234                if let Some(v) = obj
1235                    && !self
1236                        .runtime_packages_loaded_from_db
1237                        .read()
1238                        .contains_key(package_id)
1239                {
1240                    // TODO: Can this lock ever block execution?
1241                    // TODO: Another way to avoid the cost of maintaining this map is to not
1242                    // enable it in normal runs, and if a fork is detected, rerun it with a flag
1243                    // turned on and start populating this field.
1244                    self.runtime_packages_loaded_from_db
1245                        .write()
1246                        .insert(*package_id, v.clone());
1247                }
1248            })
1249        }
1250    }
1251}
1252
1253impl ParentSync for TemporaryStore<'_> {
1254    fn get_latest_parent_entry_ref_deprecated(&self, _object_id: ObjectID) -> Option<ObjectRef> {
1255        unreachable!("Never called in newer protocol versions")
1256    }
1257}