sui_adapter_latest/
temporary_store.rs

1// Copyright (c) Mysten Labs, Inc.
2// SPDX-License-Identifier: Apache-2.0
3
4use crate::gas_charger::GasCharger;
5use mysten_metrics::monitored_scope;
6use parking_lot::RwLock;
7use std::collections::{BTreeMap, BTreeSet, HashSet};
8use sui_protocol_config::ProtocolConfig;
9use sui_types::accumulator_event::AccumulatorEvent;
10use sui_types::accumulator_root::AccumulatorObjId;
11use sui_types::base_types::VersionDigest;
12use sui_types::committee::EpochId;
13use sui_types::deny_list_v2::check_coin_deny_list_v2_during_execution;
14use sui_types::effects::{
15    AccumulatorOperation, AccumulatorValue, AccumulatorWriteV1, TransactionEffects,
16    TransactionEvents,
17};
18use sui_types::error::ExecutionErrorKind;
19use sui_types::execution::{
20    DynamicallyLoadedObjectMetadata, ExecutionResults, ExecutionResultsV2, SharedInput,
21};
22use sui_types::execution_status::ExecutionStatus;
23use sui_types::inner_temporary_store::InnerTemporaryStore;
24use sui_types::layout_resolver::LayoutResolver;
25use sui_types::object::Data;
26use sui_types::storage::{BackingStore, DenyListResult, PackageObject};
27use sui_types::sui_system_state::{AdvanceEpochParams, get_sui_system_state_wrapper};
28use sui_types::{
29    SUI_DENY_LIST_OBJECT_ID,
30    base_types::{ObjectID, ObjectRef, SequenceNumber, SuiAddress, TransactionDigest},
31    effects::EffectsObjectChange,
32    error::{ExecutionError, SuiResult},
33    gas::GasCostSummary,
34    object::Object,
35    object::Owner,
36    storage::{BackingPackageStore, ChildObjectResolver, ParentSync, Storage},
37    transaction::InputObjects,
38};
39use sui_types::{SUI_SYSTEM_STATE_OBJECT_ID, TypeTag, is_system_package};
40
41pub struct TemporaryStore<'backing> {
42    // The backing store for retrieving Move packages onchain.
43    // When executing a Move call, the dependent packages are not going to be
44    // in the input objects. They will be fetched from the backing store.
45    // Also used for fetching the backing parent_sync to get the last known version for wrapped
46    // objects
47    store: &'backing dyn BackingStore,
48    tx_digest: TransactionDigest,
49    input_objects: BTreeMap<ObjectID, Object>,
50
51    /// Store the original versions of the non-exclusive write inputs, in order to detect
52    /// mutations (which are illegal, but not prevented by the type system).
53    non_exclusive_input_original_versions: BTreeMap<ObjectID, Object>,
54
55    stream_ended_consensus_objects: BTreeMap<ObjectID, SequenceNumber /* start_version */>,
56    /// The version to assign to all objects written by the transaction using this store.
57    lamport_timestamp: SequenceNumber,
58    /// Inputs that will be mutated by the transaction. Does not include NonExclusiveWrite inputs,
59    /// which can be taken as `&mut T` but cannot be directly mutated.
60    mutable_input_refs: BTreeMap<ObjectID, (VersionDigest, Owner)>,
61    execution_results: ExecutionResultsV2,
62    /// Objects that were loaded during execution (dynamic fields + received objects).
63    loaded_runtime_objects: BTreeMap<ObjectID, DynamicallyLoadedObjectMetadata>,
64    /// A map from wrapped object to its container. Used during expensive invariant checks.
65    wrapped_object_containers: BTreeMap<ObjectID, ObjectID>,
66    protocol_config: &'backing ProtocolConfig,
67
68    /// Every package that was loaded from DB store during execution.
69    /// These packages were not previously loaded into the temporary store.
70    runtime_packages_loaded_from_db: RwLock<BTreeMap<ObjectID, PackageObject>>,
71
72    /// The set of objects that we may receive during execution. Not guaranteed to receive all, or
73    /// any of the objects referenced in this set.
74    receiving_objects: Vec<ObjectRef>,
75
76    /// The set of all generated object IDs from the object runtime during the transaction. This includes any
77    /// created-and-then-deleted objects in addition to any `new_ids` which contains only the set
78    /// of created (but not deleted) IDs in the transaction.
79    generated_runtime_ids: BTreeSet<ObjectID>,
80
81    // TODO: Now that we track epoch here, there are a few places we don't need to pass it around.
82    /// The current epoch.
83    cur_epoch: EpochId,
84
85    /// The set of per-epoch config objects that were loaded during execution, and are not in the
86    /// input objects. This allows us to commit them to the effects.
87    loaded_per_epoch_config_objects: RwLock<BTreeSet<ObjectID>>,
88}
89
90impl<'backing> TemporaryStore<'backing> {
91    /// Creates a new store associated with an authority store, and populates it with
92    /// initial objects.
93    pub fn new(
94        store: &'backing dyn BackingStore,
95        input_objects: InputObjects,
96        receiving_objects: Vec<ObjectRef>,
97        tx_digest: TransactionDigest,
98        protocol_config: &'backing ProtocolConfig,
99        cur_epoch: EpochId,
100    ) -> Self {
101        let mutable_input_refs = input_objects.exclusive_mutable_inputs();
102        let non_exclusive_input_original_versions = input_objects.non_exclusive_input_objects();
103
104        let lamport_timestamp = input_objects.lamport_timestamp(&receiving_objects);
105        let stream_ended_consensus_objects = input_objects.consensus_stream_ended_objects();
106        let objects = input_objects.into_object_map();
107        #[cfg(debug_assertions)]
108        {
109            // Ensure that input objects and receiving objects must not overlap.
110            assert!(
111                objects
112                    .keys()
113                    .collect::<HashSet<_>>()
114                    .intersection(
115                        &receiving_objects
116                            .iter()
117                            .map(|oref| &oref.0)
118                            .collect::<HashSet<_>>()
119                    )
120                    .next()
121                    .is_none()
122            );
123        }
124        Self {
125            store,
126            tx_digest,
127            input_objects: objects,
128            non_exclusive_input_original_versions,
129            stream_ended_consensus_objects,
130            lamport_timestamp,
131            mutable_input_refs,
132            execution_results: ExecutionResultsV2::default(),
133            protocol_config,
134            loaded_runtime_objects: BTreeMap::new(),
135            wrapped_object_containers: BTreeMap::new(),
136            runtime_packages_loaded_from_db: RwLock::new(BTreeMap::new()),
137            receiving_objects,
138            generated_runtime_ids: BTreeSet::new(),
139            cur_epoch,
140            loaded_per_epoch_config_objects: RwLock::new(BTreeSet::new()),
141        }
142    }
143
144    // Helpers to access private fields
145    pub fn objects(&self) -> &BTreeMap<ObjectID, Object> {
146        &self.input_objects
147    }
148
149    pub fn update_object_version_and_prev_tx(&mut self) {
150        self.execution_results.update_version_and_previous_tx(
151            self.lamport_timestamp,
152            self.tx_digest,
153            &self.input_objects,
154            self.protocol_config.reshare_at_same_initial_version(),
155        );
156
157        #[cfg(debug_assertions)]
158        {
159            self.check_invariants();
160        }
161    }
162
163    fn calculate_accumulator_running_max_withdraws(&self) -> BTreeMap<AccumulatorObjId, u128> {
164        let mut running_net_withdraws: BTreeMap<AccumulatorObjId, i128> = BTreeMap::new();
165        let mut running_max_withdraws: BTreeMap<AccumulatorObjId, u128> = BTreeMap::new();
166        for event in &self.execution_results.accumulator_events {
167            match &event.write.value {
168                AccumulatorValue::Integer(amount) => match event.write.operation {
169                    AccumulatorOperation::Split => {
170                        let entry = running_net_withdraws
171                            .entry(event.accumulator_obj)
172                            .or_default();
173                        *entry += *amount as i128;
174                        if *entry > 0 {
175                            let max_entry = running_max_withdraws
176                                .entry(event.accumulator_obj)
177                                .or_default();
178                            *max_entry = (*max_entry).max(*entry as u128);
179                        }
180                    }
181                    AccumulatorOperation::Merge => {
182                        let entry = running_net_withdraws
183                            .entry(event.accumulator_obj)
184                            .or_default();
185                        *entry -= *amount as i128;
186                    }
187                },
188                AccumulatorValue::IntegerTuple(_, _) | AccumulatorValue::EventDigest(_) => {}
189            }
190        }
191        running_max_withdraws
192    }
193
194    /// Ensure that there is one entry for each accumulator object in the accumulator events.
195    fn merge_accumulator_events(&mut self) {
196        self.execution_results.accumulator_events = self
197            .execution_results
198            .accumulator_events
199            .iter()
200            .fold(
201                BTreeMap::<AccumulatorObjId, Vec<AccumulatorWriteV1>>::new(),
202                |mut map, event| {
203                    map.entry(event.accumulator_obj)
204                        .or_default()
205                        .push(event.write.clone());
206                    map
207                },
208            )
209            .into_iter()
210            .map(|(obj_id, writes)| {
211                AccumulatorEvent::new(obj_id, AccumulatorWriteV1::merge(writes))
212            })
213            .collect();
214    }
215
216    /// Break up the structure and return its internal stores (objects, active_inputs, written, deleted)
217    pub fn into_inner(
218        self,
219        accumulator_running_max_withdraws: BTreeMap<AccumulatorObjId, u128>,
220    ) -> InnerTemporaryStore {
221        let results = self.execution_results;
222        InnerTemporaryStore {
223            input_objects: self.input_objects,
224            stream_ended_consensus_objects: self.stream_ended_consensus_objects,
225            mutable_inputs: self.mutable_input_refs,
226            written: results.written_objects,
227            events: TransactionEvents {
228                data: results.user_events,
229            },
230            accumulator_events: results.accumulator_events,
231            loaded_runtime_objects: self.loaded_runtime_objects,
232            runtime_packages_loaded_from_db: self.runtime_packages_loaded_from_db.into_inner(),
233            lamport_version: self.lamport_timestamp,
234            binary_config: self.protocol_config.binary_config(None),
235            accumulator_running_max_withdraws,
236        }
237    }
238
239    /// For every object from active_inputs (i.e. all mutable objects), if they are not
240    /// mutated during the transaction execution, force mutating them by incrementing the
241    /// sequence number. This is required to achieve safety.
242    pub(crate) fn ensure_active_inputs_mutated(&mut self) {
243        let mut to_be_updated = vec![];
244        // Note: we do not mutate input objects if they are non-exclusive write
245        for id in self.mutable_input_refs.keys() {
246            if !self.execution_results.modified_objects.contains(id) {
247                // We cannot update here but have to push to `to_be_updated` and update later
248                // because the for loop is holding a reference to `self`, and calling
249                // `self.mutate_input_object` requires a mutable reference to `self`.
250                to_be_updated.push(self.input_objects[id].clone());
251            }
252        }
253        for object in to_be_updated {
254            // The object must be mutated as it was present in the input objects
255            self.mutate_input_object(object.clone());
256        }
257    }
258
259    fn get_object_changes(&self) -> BTreeMap<ObjectID, EffectsObjectChange> {
260        let results = &self.execution_results;
261        let all_ids = results
262            .created_object_ids
263            .iter()
264            .chain(&results.deleted_object_ids)
265            .chain(&results.modified_objects)
266            .chain(results.written_objects.keys())
267            .collect::<BTreeSet<_>>();
268        all_ids
269            .into_iter()
270            .map(|id| {
271                (
272                    *id,
273                    EffectsObjectChange::new(
274                        self.get_object_modified_at(id)
275                            .map(|metadata| ((metadata.version, metadata.digest), metadata.owner)),
276                        results.written_objects.get(id),
277                        results.created_object_ids.contains(id),
278                        results.deleted_object_ids.contains(id),
279                    ),
280                )
281            })
282            .chain(results.accumulator_events.iter().cloned().map(
283                |AccumulatorEvent {
284                     accumulator_obj,
285                     write,
286                 }| {
287                    (
288                        *accumulator_obj.inner(),
289                        EffectsObjectChange::new_from_accumulator_write(write),
290                    )
291                },
292            ))
293            .collect()
294    }
295
296    pub fn into_effects(
297        mut self,
298        shared_object_refs: Vec<SharedInput>,
299        transaction_digest: &TransactionDigest,
300        mut transaction_dependencies: BTreeSet<TransactionDigest>,
301        gas_cost_summary: GasCostSummary,
302        status: ExecutionStatus,
303        gas_charger: &mut GasCharger,
304        epoch: EpochId,
305    ) -> (InnerTemporaryStore, TransactionEffects) {
306        self.update_object_version_and_prev_tx();
307        // This must happens before merge_accumulator_events.
308        let accumulator_running_max_withdraws = self.calculate_accumulator_running_max_withdraws();
309        self.merge_accumulator_events();
310
311        // Regardless of execution status (including aborts), we insert the previous transaction
312        // for any successfully received objects during the transaction.
313        for (id, expected_version, expected_digest) in &self.receiving_objects {
314            // If the receiving object is in the loaded runtime objects, then that means that it
315            // was actually successfully loaded (so existed, and there was authenticated mutable
316            // access to it). So we insert the previous transaction as a dependency.
317            if let Some(obj_meta) = self.loaded_runtime_objects.get(id) {
318                // Check that the expected version, digest, and owner match the loaded version,
319                // digest, and owner. If they don't then don't register a dependency.
320                // This is because this could be "spoofed" by loading a dynamic object field.
321                let loaded_via_receive = obj_meta.version == *expected_version
322                    && obj_meta.digest == *expected_digest
323                    && obj_meta.owner.is_address_owned();
324                if loaded_via_receive {
325                    transaction_dependencies.insert(obj_meta.previous_transaction);
326                }
327            }
328        }
329
330        assert!(self.protocol_config.enable_effects_v2());
331
332        // In the case of special transactions that don't require a gas object,
333        // we don't really care about the effects to gas, just use the input for it.
334        // Gas coins are guaranteed to be at least size 1 and if more than 1
335        // the first coin is where all the others are merged.
336        let gas_coin = gas_charger.gas_coin();
337
338        let object_changes = self.get_object_changes();
339
340        let lamport_version = self.lamport_timestamp;
341        // TODO: Cleanup this clone. Potentially add unchanged_shraed_objects directly to InnerTempStore.
342        let loaded_per_epoch_config_objects = self.loaded_per_epoch_config_objects.read().clone();
343        let inner = self.into_inner(accumulator_running_max_withdraws);
344
345        let effects = TransactionEffects::new_from_execution_v2(
346            status,
347            epoch,
348            gas_cost_summary,
349            // TODO: Provide the list of read-only shared objects directly.
350            shared_object_refs,
351            loaded_per_epoch_config_objects,
352            *transaction_digest,
353            lamport_version,
354            object_changes,
355            gas_coin,
356            if inner.events.data.is_empty() {
357                None
358            } else {
359                Some(inner.events.digest())
360            },
361            transaction_dependencies.into_iter().collect(),
362        );
363
364        (inner, effects)
365    }
366
367    /// An internal check of the invariants (will only fire in debug)
368    #[cfg(debug_assertions)]
369    fn check_invariants(&self) {
370        // Check not both deleted and written
371        debug_assert!(
372            {
373                self.execution_results
374                    .written_objects
375                    .keys()
376                    .all(|id| !self.execution_results.deleted_object_ids.contains(id))
377            },
378            "Object both written and deleted."
379        );
380
381        // Check all mutable inputs are modified
382        debug_assert!(
383            {
384                self.mutable_input_refs
385                    .keys()
386                    .all(|id| self.execution_results.modified_objects.contains(id))
387            },
388            "Mutable input not modified."
389        );
390
391        debug_assert!(
392            {
393                self.execution_results
394                    .written_objects
395                    .values()
396                    .all(|obj| obj.previous_transaction == self.tx_digest)
397            },
398            "Object previous transaction not properly set",
399        );
400    }
401
402    /// Mutate a mutable input object. This is used to mutate input objects outside of PT execution.
403    pub fn mutate_input_object(&mut self, object: Object) {
404        let id = object.id();
405        debug_assert!(self.input_objects.contains_key(&id));
406        debug_assert!(!object.is_immutable());
407        self.execution_results.modified_objects.insert(id);
408        self.execution_results.written_objects.insert(id, object);
409    }
410
411    /// Mutate a child object outside of PT. This should be used extremely rarely.
412    /// Currently it's only used by advance_epoch_safe_mode because it's all native
413    /// without PT. This should almost never be used otherwise.
414    pub fn mutate_child_object(&mut self, old_object: Object, new_object: Object) {
415        let id = new_object.id();
416        let old_ref = old_object.compute_object_reference();
417        debug_assert_eq!(old_ref.0, id);
418        self.loaded_runtime_objects.insert(
419            id,
420            DynamicallyLoadedObjectMetadata {
421                version: old_ref.1,
422                digest: old_ref.2,
423                owner: old_object.owner.clone(),
424                storage_rebate: old_object.storage_rebate,
425                previous_transaction: old_object.previous_transaction,
426            },
427        );
428        self.execution_results.modified_objects.insert(id);
429        self.execution_results
430            .written_objects
431            .insert(id, new_object);
432    }
433
434    /// Upgrade system package during epoch change. This requires special treatment
435    /// since the system package to be upgraded is not in the input objects.
436    /// We could probably fix above to make it less special.
437    pub fn upgrade_system_package(&mut self, package: Object) {
438        let id = package.id();
439        assert!(package.is_package() && is_system_package(id));
440        self.execution_results.modified_objects.insert(id);
441        self.execution_results.written_objects.insert(id, package);
442    }
443
444    /// Crate a new objcet. This is used to create objects outside of PT execution.
445    pub fn create_object(&mut self, object: Object) {
446        // Created mutable objects' versions are set to the store's lamport timestamp when it is
447        // committed to effects. Creating an object at a non-zero version risks violating the
448        // lamport timestamp invariant (that a transaction's lamport timestamp is strictly greater
449        // than all versions witnessed by the transaction).
450        debug_assert!(
451            object.is_immutable() || object.version() == SequenceNumber::MIN,
452            "Created mutable objects should not have a version set",
453        );
454        let id = object.id();
455        self.execution_results.created_object_ids.insert(id);
456        self.execution_results.written_objects.insert(id, object);
457    }
458
459    /// Delete a mutable input object. This is used to delete input objects outside of PT execution.
460    pub fn delete_input_object(&mut self, id: &ObjectID) {
461        // there should be no deletion after write
462        debug_assert!(!self.execution_results.written_objects.contains_key(id));
463        debug_assert!(self.input_objects.contains_key(id));
464        self.execution_results.modified_objects.insert(*id);
465        self.execution_results.deleted_object_ids.insert(*id);
466    }
467
468    pub fn drop_writes(&mut self) {
469        self.execution_results.drop_writes();
470    }
471
472    pub fn read_object(&self, id: &ObjectID) -> Option<&Object> {
473        // there should be no read after delete
474        debug_assert!(!self.execution_results.deleted_object_ids.contains(id));
475        self.execution_results
476            .written_objects
477            .get(id)
478            .or_else(|| self.input_objects.get(id))
479    }
480
481    pub fn save_loaded_runtime_objects(
482        &mut self,
483        loaded_runtime_objects: BTreeMap<ObjectID, DynamicallyLoadedObjectMetadata>,
484    ) {
485        #[cfg(debug_assertions)]
486        {
487            for (id, v1) in &loaded_runtime_objects {
488                if let Some(v2) = self.loaded_runtime_objects.get(id) {
489                    assert_eq!(v1, v2);
490                }
491            }
492            for (id, v1) in &self.loaded_runtime_objects {
493                if let Some(v2) = loaded_runtime_objects.get(id) {
494                    assert_eq!(v1, v2);
495                }
496            }
497        }
498        // Merge the two maps because we may be calling the execution engine more than once
499        // (e.g. in advance epoch transaction, where we may be publishing a new system package).
500        self.loaded_runtime_objects.extend(loaded_runtime_objects);
501    }
502
503    pub fn save_wrapped_object_containers(
504        &mut self,
505        wrapped_object_containers: BTreeMap<ObjectID, ObjectID>,
506    ) {
507        #[cfg(debug_assertions)]
508        {
509            for (id, container1) in &wrapped_object_containers {
510                if let Some(container2) = self.wrapped_object_containers.get(id) {
511                    assert_eq!(container1, container2);
512                }
513            }
514            for (id, container1) in &self.wrapped_object_containers {
515                if let Some(container2) = wrapped_object_containers.get(id) {
516                    assert_eq!(container1, container2);
517                }
518            }
519        }
520        // Merge the two maps because we may be calling the execution engine more than once
521        // (e.g. in advance epoch transaction, where we may be publishing a new system package).
522        self.wrapped_object_containers
523            .extend(wrapped_object_containers);
524    }
525
526    pub fn save_generated_object_ids(&mut self, generated_ids: BTreeSet<ObjectID>) {
527        #[cfg(debug_assertions)]
528        {
529            for id in &self.generated_runtime_ids {
530                assert!(!generated_ids.contains(id))
531            }
532            for id in &generated_ids {
533                assert!(!self.generated_runtime_ids.contains(id));
534            }
535        }
536        self.generated_runtime_ids.extend(generated_ids);
537    }
538
539    pub fn estimate_effects_size_upperbound(&self) -> usize {
540        TransactionEffects::estimate_effects_size_upperbound_v2(
541            self.execution_results.written_objects.len(),
542            self.execution_results.modified_objects.len(),
543            self.input_objects.len(),
544        )
545    }
546
547    pub fn written_objects_size(&self) -> usize {
548        self.execution_results
549            .written_objects
550            .values()
551            .fold(0, |sum, obj| sum + obj.object_size_for_gas_metering())
552    }
553
554    /// If there are unmetered storage rebate (due to system transaction), we put them into
555    /// the storage rebate of 0x5 object.
556    /// TODO: This will not work for potential future new system transactions if 0x5 is not in the input.
557    /// We should fix this.
558    pub fn conserve_unmetered_storage_rebate(&mut self, unmetered_storage_rebate: u64) {
559        if unmetered_storage_rebate == 0 {
560            // If unmetered_storage_rebate is 0, we are most likely executing the genesis transaction.
561            // And in that case we cannot mutate the 0x5 object because it's newly created.
562            // And there is no storage rebate that needs distribution anyway.
563            return;
564        }
565        tracing::debug!(
566            "Amount of unmetered storage rebate from system tx: {:?}",
567            unmetered_storage_rebate
568        );
569        let mut system_state_wrapper = self
570            .read_object(&SUI_SYSTEM_STATE_OBJECT_ID)
571            .expect("0x5 object must be mutated in system tx with unmetered storage rebate")
572            .clone();
573        // In unmetered execution, storage_rebate field of mutated object must be 0.
574        // If not, we would be dropping SUI on the floor by overriding it.
575        assert_eq!(system_state_wrapper.storage_rebate, 0);
576        system_state_wrapper.storage_rebate = unmetered_storage_rebate;
577        self.mutate_input_object(system_state_wrapper);
578    }
579
580    /// Add an accumulator event to the execution results.
581    pub fn add_accumulator_event(&mut self, event: AccumulatorEvent) {
582        self.execution_results.accumulator_events.push(event);
583    }
584
585    /// Given an object ID, if it's not modified, returns None.
586    /// Otherwise returns its metadata, including version, digest, owner and storage rebate.
587    /// A modified object must be either a mutable input, or a loaded child object.
588    /// The only exception is when we upgrade system packages, in which case the upgraded
589    /// system packages are not part of input, but are modified.
590    fn get_object_modified_at(
591        &self,
592        object_id: &ObjectID,
593    ) -> Option<DynamicallyLoadedObjectMetadata> {
594        if self.execution_results.modified_objects.contains(object_id) {
595            Some(
596                self.mutable_input_refs
597                    .get(object_id)
598                    .map(
599                        |((version, digest), owner)| DynamicallyLoadedObjectMetadata {
600                            version: *version,
601                            digest: *digest,
602                            owner: owner.clone(),
603                            // It's guaranteed that a mutable input object is an input object.
604                            storage_rebate: self.input_objects[object_id].storage_rebate,
605                            previous_transaction: self.input_objects[object_id]
606                                .previous_transaction,
607                        },
608                    )
609                    .or_else(|| self.loaded_runtime_objects.get(object_id).cloned())
610                    .unwrap_or_else(|| {
611                        debug_assert!(is_system_package(*object_id));
612                        let package_obj =
613                            self.store.get_package_object(object_id).unwrap().unwrap();
614                        let obj = package_obj.object();
615                        DynamicallyLoadedObjectMetadata {
616                            version: obj.version(),
617                            digest: obj.digest(),
618                            owner: obj.owner.clone(),
619                            storage_rebate: obj.storage_rebate,
620                            previous_transaction: obj.previous_transaction,
621                        }
622                    }),
623            )
624        } else {
625            None
626        }
627    }
628}
629
630impl TemporaryStore<'_> {
631    // check that every object read is owned directly or indirectly by sender, sponsor,
632    // or a shared object input
633    pub fn check_ownership_invariants(
634        &self,
635        sender: &SuiAddress,
636        sponsor: &Option<SuiAddress>,
637        gas_charger: &mut GasCharger,
638        mutable_inputs: &HashSet<ObjectID>,
639        is_epoch_change: bool,
640    ) -> SuiResult<()> {
641        let gas_objs: HashSet<&ObjectID> = gas_charger.gas_coins().map(|g| &g.0).collect();
642        let gas_owner = sponsor.as_ref().unwrap_or(sender);
643
644        // mark input objects as authenticated
645        let mut authenticated_for_mutation: HashSet<_> = self
646            .input_objects
647            .iter()
648            .filter_map(|(id, obj)| {
649                match &obj.owner {
650                    Owner::AddressOwner(a) => {
651                        if gas_objs.contains(id) {
652                            // gas object must be owned by sender or sponsor
653                            assert!(
654                                a == gas_owner,
655                                "Gas object must be owned by sender or sponsor"
656                            );
657                        } else {
658                            assert!(sender == a, "Input object must be owned by sender");
659                        }
660                        Some(id)
661                    }
662                    Owner::Shared { .. } | Owner::ConsensusAddressOwner { .. } => Some(id),
663                    Owner::Immutable => {
664                        // object is authenticated, but it cannot own other objects,
665                        // so we should not add it to `authenticated_objs`
666                        // However, we would definitely want to add immutable objects
667                        // to the set of authenticated roots if we were doing runtime
668                        // checks inside the VM instead of after-the-fact in the temporary
669                        // store. Here, we choose not to add them because this will catch a
670                        // bug where we mutate or delete an object that belongs to an immutable
671                        // object (though it will show up somewhat opaquely as an authentication
672                        // failure), whereas adding the immutable object to the roots will prevent
673                        // us from catching this.
674                        None
675                    }
676                    Owner::ObjectOwner(_parent) => {
677                        unreachable!(
678                            "Input objects must be address owned, shared, consensus, or immutable"
679                        )
680                    }
681                }
682            })
683            .filter(|id| {
684                // remove any non-mutable inputs. This will remove deleted or readonly shared
685                // objects
686                mutable_inputs.contains(id)
687            })
688            .copied()
689            // Add any object IDs generated in the object runtime during execution to the
690            // authenticated set (i.e., new (non-package) objects, and possibly ephemeral UIDs).
691            .chain(self.generated_runtime_ids.iter().copied())
692            .collect();
693
694        // Add sender and sponsor (if present) to authenticated set
695        authenticated_for_mutation.insert((*sender).into());
696        if let Some(sponsor) = sponsor {
697            authenticated_for_mutation.insert((*sponsor).into());
698        }
699
700        // check all modified objects are authenticated
701        let mut objects_to_authenticate = self
702            .execution_results
703            .modified_objects
704            .iter()
705            .copied()
706            .collect::<Vec<_>>();
707
708        while let Some(to_authenticate) = objects_to_authenticate.pop() {
709            if authenticated_for_mutation.contains(&to_authenticate) {
710                // object has already been authenticated
711                continue;
712            }
713
714            let parent = if let Some(container_id) =
715                self.wrapped_object_containers.get(&to_authenticate)
716            {
717                // It's a wrapped object, so check that the container is authenticated
718                *container_id
719            } else {
720                // It's non-wrapped, so check the owner -- we can load the object from the
721                // store.
722                let Some(old_obj) = self.store.get_object(&to_authenticate) else {
723                    panic!(
724                        "Failed to load object {to_authenticate:?}.\n \
725                         If it cannot be loaded, we would expect it to be in the wrapped object map: {:#?}",
726                        &self.wrapped_object_containers
727                    )
728                };
729
730                match &old_obj.owner {
731                    // We mutated a dynamic field, we can continue to trace this back to verify
732                    // proper ownership.
733                    Owner::ObjectOwner(parent) => ObjectID::from(*parent),
734                    // We mutated an address owned or sequenced address owned object -- one of two cases apply:
735                    // 1) the object is owned by an object or address in the authenticated set,
736                    // 2) the object is owned by some other address, in which case we should
737                    //    continue to trace this back.
738                    Owner::AddressOwner(parent)
739                    | Owner::ConsensusAddressOwner { owner: parent, .. } => {
740                        // For Receiving<_> objects, the address owner is actually an object.
741                        // If it was actually an address, we should have caught it as an input and
742                        // it would already have been in authenticated_for_mutation
743                        ObjectID::from(*parent)
744                    }
745                    // We mutated a shared object -- we checked if this object was in the
746                    // authenticated set at the top of this loop and it wasn't so this is a failure.
747                    owner @ Owner::Shared { .. } => {
748                        panic!(
749                            "Unauthenticated root at {to_authenticate:?} with owner {owner:?}\n\
750                             Potentially covering objects in: {authenticated_for_mutation:#?}"
751                        );
752                    }
753                    Owner::Immutable => {
754                        assert!(
755                            is_epoch_change,
756                            "Immutable objects cannot be written, except for \
757                             Sui Framework/Move stdlib upgrades at epoch change boundaries"
758                        );
759                        // Note: this assumes that the only immutable objects an epoch change
760                        // tx can update are system packages,
761                        // but in principle we could allow others.
762                        assert!(
763                            is_system_package(to_authenticate),
764                            "Only system packages can be upgraded"
765                        );
766                        continue;
767                    }
768                }
769            };
770
771            // we now assume the object is authenticated and check the parent
772            authenticated_for_mutation.insert(to_authenticate);
773            objects_to_authenticate.push(parent);
774        }
775        Ok(())
776    }
777}
778
779impl TemporaryStore<'_> {
780    /// Track storage gas for each mutable input object (including the gas coin)
781    /// and each created object. Compute storage refunds for each deleted object.
782    /// Will *not* charge anything, gas status keeps track of storage cost and rebate.
783    /// All objects will be updated with their new (current) storage rebate/cost.
784    /// `SuiGasStatus` `storage_rebate` and `storage_gas_units` track the transaction
785    /// overall storage rebate and cost.
786    pub(crate) fn collect_storage_and_rebate(&mut self, gas_charger: &mut GasCharger) {
787        // Use two loops because we cannot mut iterate written while calling get_object_modified_at.
788        let old_storage_rebates: Vec<_> = self
789            .execution_results
790            .written_objects
791            .keys()
792            .map(|object_id| {
793                self.get_object_modified_at(object_id)
794                    .map(|metadata| metadata.storage_rebate)
795                    .unwrap_or_default()
796            })
797            .collect();
798        for (object, old_storage_rebate) in self
799            .execution_results
800            .written_objects
801            .values_mut()
802            .zip(old_storage_rebates)
803        {
804            // new object size
805            let new_object_size = object.object_size_for_gas_metering();
806            // track changes and compute the new object `storage_rebate`
807            let new_storage_rebate = gas_charger.track_storage_mutation(
808                object.id(),
809                new_object_size,
810                old_storage_rebate,
811            );
812            object.storage_rebate = new_storage_rebate;
813        }
814
815        self.collect_rebate(gas_charger);
816    }
817
818    pub(crate) fn collect_rebate(&self, gas_charger: &mut GasCharger) {
819        for object_id in &self.execution_results.modified_objects {
820            if self
821                .execution_results
822                .written_objects
823                .contains_key(object_id)
824            {
825                continue;
826            }
827            // get and track the deleted object `storage_rebate`
828            let storage_rebate = self
829                .get_object_modified_at(object_id)
830                // Unwrap is safe because this loop iterates through all modified objects.
831                .unwrap()
832                .storage_rebate;
833            gas_charger.track_storage_mutation(*object_id, 0, storage_rebate);
834        }
835    }
836
837    pub fn check_execution_results_consistency(&self) -> Result<(), ExecutionError> {
838        assert_invariant!(
839            self.execution_results
840                .created_object_ids
841                .iter()
842                .all(|id| !self.execution_results.deleted_object_ids.contains(id)
843                    && !self.execution_results.modified_objects.contains(id)),
844            "Created object IDs cannot also be deleted or modified"
845        );
846        assert_invariant!(
847            self.execution_results.modified_objects.iter().all(|id| {
848                self.mutable_input_refs.contains_key(id)
849                    || self.loaded_runtime_objects.contains_key(id)
850                    || is_system_package(*id)
851            }),
852            "A modified object must be either a mutable input, a loaded child object, or a system package"
853        );
854        Ok(())
855    }
856}
857//==============================================================================
858// Charge gas current - end
859//==============================================================================
860
861impl TemporaryStore<'_> {
862    pub fn advance_epoch_safe_mode(
863        &mut self,
864        params: &AdvanceEpochParams,
865        protocol_config: &ProtocolConfig,
866    ) {
867        let wrapper = get_sui_system_state_wrapper(self.store.as_object_store())
868            .expect("System state wrapper object must exist");
869        let (old_object, new_object) =
870            wrapper.advance_epoch_safe_mode(params, self.store.as_object_store(), protocol_config);
871        self.mutate_child_object(old_object, new_object);
872    }
873}
874
875type ModifiedObjectInfo<'a> = (
876    ObjectID,
877    // old object metadata, including version, digest, owner, and storage rebate.
878    Option<DynamicallyLoadedObjectMetadata>,
879    Option<&'a Object>,
880);
881
882impl TemporaryStore<'_> {
883    fn get_input_sui(
884        &self,
885        id: &ObjectID,
886        expected_version: SequenceNumber,
887        layout_resolver: &mut impl LayoutResolver,
888    ) -> Result<u64, ExecutionError> {
889        if let Some(obj) = self.input_objects.get(id) {
890            // the assumption here is that if it is in the input objects must be the right one
891            if obj.version() != expected_version {
892                invariant_violation!(
893                    "Version mismatching when resolving input object to check conservation--\
894                     expected {}, got {}",
895                    expected_version,
896                    obj.version(),
897                );
898            }
899            obj.get_total_sui(layout_resolver).map_err(|e| {
900                make_invariant_violation!(
901                    "Failed looking up input SUI in SUI conservation checking for input with \
902                         type {:?}: {e:#?}",
903                    obj.struct_tag(),
904                )
905            })
906        } else {
907            // not in input objects, must be a dynamic field
908            let Some(obj) = self.store.get_object_by_key(id, expected_version) else {
909                invariant_violation!(
910                    "Failed looking up dynamic field {id} in SUI conservation checking"
911                );
912            };
913            obj.get_total_sui(layout_resolver).map_err(|e| {
914                make_invariant_violation!(
915                    "Failed looking up input SUI in SUI conservation checking for type \
916                         {:?}: {e:#?}",
917                    obj.struct_tag(),
918                )
919            })
920        }
921    }
922
923    /// Return the list of all modified objects, for each object, returns
924    /// - Object ID,
925    /// - Input: If the object existed prior to this transaction, include their version and storage_rebate,
926    /// - Output: If a new version of the object is written, include the new object.
927    fn get_modified_objects(&self) -> Vec<ModifiedObjectInfo<'_>> {
928        self.execution_results
929            .modified_objects
930            .iter()
931            .map(|id| {
932                let metadata = self.get_object_modified_at(id);
933                let output = self.execution_results.written_objects.get(id);
934                (*id, metadata, output)
935            })
936            .chain(
937                self.execution_results
938                    .written_objects
939                    .iter()
940                    .filter_map(|(id, object)| {
941                        if self.execution_results.modified_objects.contains(id) {
942                            None
943                        } else {
944                            Some((*id, None, Some(object)))
945                        }
946                    }),
947            )
948            .collect()
949    }
950
951    /// Check that this transaction neither creates nor destroys SUI. This should hold for all txes
952    /// except the epoch change tx, which mints staking rewards equal to the gas fees burned in the
953    /// previous epoch.  Specifically, this checks two key invariants about storage
954    /// fees and storage rebate:
955    ///
956    /// 1. all SUI in storage rebate fields of input objects should flow either to the transaction
957    ///    storage rebate, or the transaction non-refundable storage rebate
958    /// 2. all SUI charged for storage should flow into the storage rebate field of some output
959    ///    object
960    ///
961    /// This function is intended to be called *after* we have charged for
962    /// gas + applied the storage rebate to the gas object, but *before* we
963    /// have updated object versions.
964    pub fn check_sui_conserved(
965        &self,
966        simple_conservation_checks: bool,
967        gas_summary: &GasCostSummary,
968    ) -> Result<(), ExecutionError> {
969        if !simple_conservation_checks {
970            return Ok(());
971        }
972        // total amount of SUI in storage rebate of input objects
973        let mut total_input_rebate = 0;
974        // total amount of SUI in storage rebate of output objects
975        let mut total_output_rebate = 0;
976        for (_, input, output) in self.get_modified_objects() {
977            if let Some(input) = input {
978                total_input_rebate += input.storage_rebate;
979            }
980            if let Some(object) = output {
981                total_output_rebate += object.storage_rebate;
982            }
983        }
984
985        if gas_summary.storage_cost == 0 {
986            // this condition is usually true when the transaction went OOG and no
987            // gas is left for storage charges.
988            // The storage cost has to be there at least for the gas coin which
989            // will not be deleted even when going to 0.
990            // However if the storage cost is 0 and if there is any object touched
991            // or deleted the value in input must be equal to the output plus rebate and
992            // non refundable.
993            // Rebate and non refundable will be positive when there are object deleted
994            // (gas smashing being the primary and possibly only example).
995            // A more typical condition is for all storage charges in summary to be 0 and
996            // then input and output must be the same value
997            if total_input_rebate
998                != total_output_rebate
999                    + gas_summary.storage_rebate
1000                    + gas_summary.non_refundable_storage_fee
1001            {
1002                return Err(ExecutionError::invariant_violation(format!(
1003                    "SUI conservation failed -- no storage charges in gas summary \
1004                        and total storage input rebate {} not equal  \
1005                        to total storage output rebate {}",
1006                    total_input_rebate, total_output_rebate,
1007                )));
1008            }
1009        } else {
1010            // all SUI in storage rebate fields of input objects should flow either to
1011            // the transaction storage rebate, or the non-refundable storage rebate pool
1012            if total_input_rebate
1013                != gas_summary.storage_rebate + gas_summary.non_refundable_storage_fee
1014            {
1015                return Err(ExecutionError::invariant_violation(format!(
1016                    "SUI conservation failed -- {} SUI in storage rebate field of input objects, \
1017                        {} SUI in tx storage rebate or tx non-refundable storage rebate",
1018                    total_input_rebate, gas_summary.non_refundable_storage_fee,
1019                )));
1020            }
1021
1022            // all SUI charged for storage should flow into the storage rebate field
1023            // of some output object
1024            if gas_summary.storage_cost != total_output_rebate {
1025                return Err(ExecutionError::invariant_violation(format!(
1026                    "SUI conservation failed -- {} SUI charged for storage, \
1027                        {} SUI in storage rebate field of output objects",
1028                    gas_summary.storage_cost, total_output_rebate
1029                )));
1030            }
1031        }
1032        Ok(())
1033    }
1034
1035    /// Check that this transaction neither creates nor destroys SUI.
1036    /// This more expensive check will check a third invariant on top of the 2 performed
1037    /// by `check_sui_conserved` above:
1038    ///
1039    /// * all SUI in input objects (including coins etc in the Move part of an object) should flow
1040    ///   either to an output object, or be burned as part of computation fees or non-refundable
1041    ///   storage rebate
1042    ///
1043    /// This function is intended to be called *after* we have charged for gas + applied the
1044    /// storage rebate to the gas object, but *before* we have updated object versions. The
1045    /// advance epoch transaction would mint `epoch_fees` amount of SUI, and burn `epoch_rebates`
1046    /// amount of SUI. We need these information for this check.
1047    pub fn check_sui_conserved_expensive(
1048        &self,
1049        gas_summary: &GasCostSummary,
1050        advance_epoch_gas_summary: Option<(u64, u64)>,
1051        layout_resolver: &mut impl LayoutResolver,
1052    ) -> Result<(), ExecutionError> {
1053        // total amount of SUI in input objects, including both coins and storage rebates
1054        let mut total_input_sui = 0;
1055        // total amount of SUI in output objects, including both coins and storage rebates
1056        let mut total_output_sui = 0;
1057
1058        // settlement input/output sui is used by the settlement transactions to account for
1059        // Sui that has been gathered from the accumulator writes of transactions which it is
1060        // settling.
1061        total_input_sui += self.execution_results.settlement_input_sui;
1062        total_output_sui += self.execution_results.settlement_output_sui;
1063
1064        for (id, input, output) in self.get_modified_objects() {
1065            if let Some(input) = input {
1066                total_input_sui += self.get_input_sui(&id, input.version, layout_resolver)?;
1067            }
1068            if let Some(object) = output {
1069                total_output_sui += object.get_total_sui(layout_resolver).map_err(|e| {
1070                    make_invariant_violation!(
1071                        "Failed looking up output SUI in SUI conservation checking for \
1072                         mutated type {:?}: {e:#?}",
1073                        object.struct_tag(),
1074                    )
1075                })?;
1076            }
1077        }
1078
1079        for event in &self.execution_results.accumulator_events {
1080            let (input, output) = event.total_sui_in_event();
1081            total_input_sui += input;
1082            total_output_sui += output;
1083        }
1084
1085        // note: storage_cost flows into the storage_rebate field of the output objects, which is
1086        // why it is not accounted for here.
1087        // similarly, all of the storage_rebate *except* the storage_fund_rebate_inflow
1088        // gets credited to the gas coin both computation costs and storage rebate inflow are
1089        total_output_sui += gas_summary.computation_cost + gas_summary.non_refundable_storage_fee;
1090        if let Some((epoch_fees, epoch_rebates)) = advance_epoch_gas_summary {
1091            total_input_sui += epoch_fees;
1092            total_output_sui += epoch_rebates;
1093        }
1094        if total_input_sui != total_output_sui {
1095            return Err(ExecutionError::invariant_violation(format!(
1096                "SUI conservation failed: input={}, output={}, \
1097                    this transaction either mints or burns SUI",
1098                total_input_sui, total_output_sui,
1099            )));
1100        }
1101        Ok(())
1102    }
1103}
1104
1105impl ChildObjectResolver for TemporaryStore<'_> {
1106    fn read_child_object(
1107        &self,
1108        parent: &ObjectID,
1109        child: &ObjectID,
1110        child_version_upper_bound: SequenceNumber,
1111    ) -> SuiResult<Option<Object>> {
1112        let obj_opt = self.execution_results.written_objects.get(child);
1113        if obj_opt.is_some() {
1114            Ok(obj_opt.cloned())
1115        } else {
1116            let _scope = monitored_scope("Execution::read_child_object");
1117            self.store
1118                .read_child_object(parent, child, child_version_upper_bound)
1119        }
1120    }
1121
1122    fn get_object_received_at_version(
1123        &self,
1124        owner: &ObjectID,
1125        receiving_object_id: &ObjectID,
1126        receive_object_at_version: SequenceNumber,
1127        epoch_id: EpochId,
1128    ) -> SuiResult<Option<Object>> {
1129        // You should never be able to try and receive an object after deleting it or writing it in the same
1130        // transaction since `Receiving` doesn't have copy.
1131        debug_assert!(
1132            !self
1133                .execution_results
1134                .written_objects
1135                .contains_key(receiving_object_id)
1136        );
1137        debug_assert!(
1138            !self
1139                .execution_results
1140                .deleted_object_ids
1141                .contains(receiving_object_id)
1142        );
1143        self.store.get_object_received_at_version(
1144            owner,
1145            receiving_object_id,
1146            receive_object_at_version,
1147            epoch_id,
1148        )
1149    }
1150}
1151
1152/// Compares the owner and payload of an object.
1153/// This is used to detect illegal writes to non-exclusive write objects.
1154fn was_object_mutated(object: &Object, original: &Object) -> bool {
1155    let data_equal = match (&object.data, &original.data) {
1156        (Data::Move(a), Data::Move(b)) => a.contents_and_type_equal(b),
1157        // We don't have a use for package content-equality, so we remain as strict as
1158        // possible for now.
1159        (Data::Package(a), Data::Package(b)) => a == b,
1160        _ => false,
1161    };
1162
1163    let owner_equal = match (&object.owner, &original.owner) {
1164        // We don't compare initial shared versions, because re-shared objects do not have the
1165        // correct initial shared version at this point in time, and this field is not something
1166        // that can be modified by a single transaction anyway.
1167        (Owner::Shared { .. }, Owner::Shared { .. }) => true,
1168        (
1169            Owner::ConsensusAddressOwner { owner: a, .. },
1170            Owner::ConsensusAddressOwner { owner: b, .. },
1171        ) => a == b,
1172        (Owner::AddressOwner(a), Owner::AddressOwner(b)) => a == b,
1173        (Owner::Immutable, Owner::Immutable) => true,
1174        (Owner::ObjectOwner(a), Owner::ObjectOwner(b)) => a == b,
1175
1176        // Keep the left hand side of the match exhaustive to catch future
1177        // changes to Owner
1178        (Owner::AddressOwner(_), _)
1179        | (Owner::Immutable, _)
1180        | (Owner::ObjectOwner(_), _)
1181        | (Owner::Shared { .. }, _)
1182        | (Owner::ConsensusAddressOwner { .. }, _) => false,
1183    };
1184
1185    !data_equal || !owner_equal
1186}
1187
1188impl Storage for TemporaryStore<'_> {
1189    fn reset(&mut self) {
1190        self.drop_writes();
1191    }
1192
1193    fn read_object(&self, id: &ObjectID) -> Option<&Object> {
1194        TemporaryStore::read_object(self, id)
1195    }
1196
1197    /// Take execution results v2, and translate it back to be compatible with effects v1.
1198    fn record_execution_results(
1199        &mut self,
1200        results: ExecutionResults,
1201    ) -> Result<(), ExecutionError> {
1202        let ExecutionResults::V2(mut results) = results else {
1203            panic!("ExecutionResults::V2 expected in sui-execution v1 and above");
1204        };
1205
1206        // for all non-exclusive write inputs, remove them from written objects
1207        let mut to_remove = Vec::new();
1208        for (id, original) in &self.non_exclusive_input_original_versions {
1209            // Object must be present in `written_objects` and identical
1210            if results
1211                .written_objects
1212                .get(id)
1213                .map(|obj| was_object_mutated(obj, original))
1214                .unwrap_or(true)
1215            {
1216                return Err(ExecutionError::new_with_source(
1217                    ExecutionErrorKind::NonExclusiveWriteInputObjectModified { id: *id },
1218                    "Non-exclusive write input object has been modified or deleted",
1219                ));
1220            }
1221            to_remove.push(*id);
1222        }
1223
1224        for id in to_remove {
1225            results.written_objects.remove(&id);
1226            results.modified_objects.remove(&id);
1227        }
1228
1229        // It's important to merge instead of override results because it's
1230        // possible to execute PT more than once during tx execution.
1231        self.execution_results.merge_results(results);
1232
1233        Ok(())
1234    }
1235
1236    fn save_loaded_runtime_objects(
1237        &mut self,
1238        loaded_runtime_objects: BTreeMap<ObjectID, DynamicallyLoadedObjectMetadata>,
1239    ) {
1240        TemporaryStore::save_loaded_runtime_objects(self, loaded_runtime_objects)
1241    }
1242
1243    fn save_wrapped_object_containers(
1244        &mut self,
1245        wrapped_object_containers: BTreeMap<ObjectID, ObjectID>,
1246    ) {
1247        TemporaryStore::save_wrapped_object_containers(self, wrapped_object_containers)
1248    }
1249
1250    fn check_coin_deny_list(
1251        &self,
1252        receiving_funds_type_and_owners: BTreeMap<TypeTag, BTreeSet<SuiAddress>>,
1253    ) -> DenyListResult {
1254        let result = check_coin_deny_list_v2_during_execution(
1255            receiving_funds_type_and_owners,
1256            self.cur_epoch,
1257            self.store.as_object_store(),
1258        );
1259        // The denylist object is only loaded if there are regulated transfers.
1260        // And also if we already have it in the input there is no need to commit it again in the effects.
1261        if result.num_non_gas_coin_owners > 0
1262            && !self.input_objects.contains_key(&SUI_DENY_LIST_OBJECT_ID)
1263        {
1264            self.loaded_per_epoch_config_objects
1265                .write()
1266                .insert(SUI_DENY_LIST_OBJECT_ID);
1267        }
1268        result
1269    }
1270
1271    fn record_generated_object_ids(&mut self, generated_ids: BTreeSet<ObjectID>) {
1272        TemporaryStore::save_generated_object_ids(self, generated_ids)
1273    }
1274}
1275
1276impl BackingPackageStore for TemporaryStore<'_> {
1277    fn get_package_object(&self, package_id: &ObjectID) -> SuiResult<Option<PackageObject>> {
1278        // We first check the objects in the temporary store because in non-production code path,
1279        // it is possible to read packages that are just written in the same transaction.
1280        // This can happen for example when we run the expensive conservation checks, where we may
1281        // look into the types of each written object in the output, and some of them need the
1282        // newly written packages for type checking.
1283        // In production path though, this should never happen.
1284        if let Some(obj) = self.execution_results.written_objects.get(package_id) {
1285            Ok(Some(PackageObject::new(obj.clone())))
1286        } else {
1287            self.store.get_package_object(package_id).inspect(|obj| {
1288                // Track object but leave unchanged
1289                if let Some(v) = obj
1290                    && !self
1291                        .runtime_packages_loaded_from_db
1292                        .read()
1293                        .contains_key(package_id)
1294                {
1295                    // TODO: Can this lock ever block execution?
1296                    // TODO: Another way to avoid the cost of maintaining this map is to not
1297                    // enable it in normal runs, and if a fork is detected, rerun it with a flag
1298                    // turned on and start populating this field.
1299                    self.runtime_packages_loaded_from_db
1300                        .write()
1301                        .insert(*package_id, v.clone());
1302                }
1303            })
1304        }
1305    }
1306}
1307
1308impl ParentSync for TemporaryStore<'_> {
1309    fn get_latest_parent_entry_ref_deprecated(&self, _object_id: ObjectID) -> Option<ObjectRef> {
1310        unreachable!("Never called in newer protocol versions")
1311    }
1312}