Merge "UseV2 soft memory calculations" into main
diff --git a/statsd/src/FieldValue.cpp b/statsd/src/FieldValue.cpp
index 84fad92..92ed96d 100644
--- a/statsd/src/FieldValue.cpp
+++ b/statsd/src/FieldValue.cpp
@@ -570,6 +570,14 @@
     return totalSize;
 }
 
+size_t getFieldValuesSizeV2(const std::vector<FieldValue>& fieldValues) {
+    size_t totalSize = 0;
+    for (const FieldValue& fieldValue : fieldValues) {
+        totalSize += fieldValue.getSizeV2();
+    }
+    return totalSize;
+}
+
 bool shouldKeepSample(const FieldValue& sampleFieldValue, int shardOffset, int shardCount) {
     int hashValue = 0;
     switch (sampleFieldValue.mValue.type) {
diff --git a/statsd/src/FieldValue.h b/statsd/src/FieldValue.h
index 66b5475..22fa89b 100644
--- a/statsd/src/FieldValue.h
+++ b/statsd/src/FieldValue.h
@@ -448,6 +448,10 @@
         return mField.getSize() + mValue.getSize();
     }
 
+    size_t getSizeV2() const {
+        return mValue.getSize();
+    }
+
     Field mField;
     Value mValue;
     Annotations mAnnotations;
@@ -482,6 +486,9 @@
 // the size is computed at runtime using the actual contents stored in the FieldValue.
 size_t getSize(const std::vector<FieldValue>& fieldValues);
 
+// Same as getSize but does not compute the size of Field.
+size_t getFieldValuesSizeV2(const std::vector<FieldValue>& fieldValues);
+
 bool shouldKeepSample(const FieldValue& sampleFieldValue, int shardOffset, int shardCount);
 
 }  // namespace statsd
diff --git a/statsd/src/HashableDimensionKey.cpp b/statsd/src/HashableDimensionKey.cpp
index 837d9e9..03a4229 100644
--- a/statsd/src/HashableDimensionKey.cpp
+++ b/statsd/src/HashableDimensionKey.cpp
@@ -398,6 +398,21 @@
     return mStateValuesKey < that.getStateValuesKey();
 }
 
+size_t MetricDimensionKey::getSize(const bool usesNestedDimensions) const {
+    size_t dimensionKeySize = 0;
+    // Dimension/State values
+    if (usesNestedDimensions) {
+        // Assume nested dimension adds an additional atomTag + # of dimension fields
+        dimensionKeySize += sizeof(int32_t);
+        dimensionKeySize += sizeof(int32_t) * getDimensionKeyInWhat().getValues().size();
+    }
+    dimensionKeySize += getFieldValuesSizeV2(getDimensionKeyInWhat().getValues());
+    // Each state value has a atomId and group/value
+    dimensionKeySize += sizeof(int32_t) * getStateValuesKey().getValues().size();
+    dimensionKeySize += getFieldValuesSizeV2(getStateValuesKey().getValues());
+    return dimensionKeySize;
+}
+
 bool AtomDimensionKey::operator==(const AtomDimensionKey& that) const {
     return mAtomTag == that.getAtomTag() && mAtomFieldValues == that.getAtomFieldValues();
 };
diff --git a/statsd/src/HashableDimensionKey.h b/statsd/src/HashableDimensionKey.h
index 5753d98..4792e8d 100644
--- a/statsd/src/HashableDimensionKey.h
+++ b/statsd/src/HashableDimensionKey.h
@@ -135,6 +135,8 @@
 
     bool operator<(const MetricDimensionKey& that) const;
 
+    size_t getSize(const bool usesNestedDimensions) const;
+
 private:
     HashableDimensionKey mDimensionKeyInWhat;
     HashableDimensionKey mStateValuesKey;
diff --git a/statsd/src/StatsLogProcessor.cpp b/statsd/src/StatsLogProcessor.cpp
index 9fe282a..95882d5 100644
--- a/statsd/src/StatsLogProcessor.cpp
+++ b/statsd/src/StatsLogProcessor.cpp
@@ -1114,8 +1114,11 @@
                                                MetricsManager& metricsManager) {
     int64_t elapsedRealtimeNs = getElapsedRealtimeNs();
     auto lastCheckTime = mLastByteSizeTimes.find(key);
+    int64_t minCheckPeriodNs = metricsManager.useV2SoftMemoryCalculation()
+                                       ? StatsdStats::kMinByteSizeV2CheckPeriodNs
+                                       : StatsdStats::kMinByteSizeCheckPeriodNs;
     if (lastCheckTime != mLastByteSizeTimes.end()) {
-        if (elapsedRealtimeNs - lastCheckTime->second < StatsdStats::kMinByteSizeCheckPeriodNs) {
+        if (elapsedRealtimeNs - lastCheckTime->second < minCheckPeriodNs) {
             return;
         }
     }
diff --git a/statsd/src/guardrail/StatsdStats.cpp b/statsd/src/guardrail/StatsdStats.cpp
index 8cd68dc..16354b7 100644
--- a/statsd/src/guardrail/StatsdStats.cpp
+++ b/statsd/src/guardrail/StatsdStats.cpp
@@ -137,6 +137,7 @@
 const int FIELD_ID_DB_DELETION_TOO_OLD = 35;
 const int FIELD_ID_DB_DELETION_CONFIG_REMOVED = 36;
 const int FIELD_ID_DB_DELETION_CONFIG_UPDATED = 37;
+const int FIELD_ID_CONFIG_METADATA_PROVIDER_PROMOTION_FAILED = 38;
 
 const int FIELD_ID_INVALID_CONFIG_REASON_ENUM = 1;
 const int FIELD_ID_INVALID_CONFIG_REASON_METRIC_ID = 2;
@@ -552,6 +553,16 @@
     it->second->db_deletion_config_updated++;
 }
 
+void StatsdStats::noteConfigMetadataProviderPromotionFailed(const ConfigKey& key) {
+    lock_guard<std::mutex> lock(mLock);
+    auto it = mConfigStats.find(key);
+    if (it == mConfigStats.end()) {
+        ALOGE("Config key %s not found!", key.ToString().c_str());
+        return;
+    }
+    it->second->config_metadata_provider_promote_failure++;
+}
+
 void StatsdStats::noteUidMapDropped(int deltas) {
     lock_guard<std::mutex> lock(mLock);
     mUidMapStats.dropped_changes += mUidMapStats.dropped_changes + deltas;
@@ -1095,6 +1106,7 @@
         config.second->db_deletion_too_old = 0;
         config.second->db_deletion_config_removed = 0;
         config.second->db_deletion_config_updated = 0;
+        config.second->config_metadata_provider_promote_failure = 0;
     }
     for (auto& pullStats : mPulledAtomStats) {
         pullStats.second.totalPull = 0;
@@ -1212,6 +1224,10 @@
                     configStats->db_deletion_too_old, configStats->db_deletion_config_removed,
                     configStats->db_deletion_config_updated);
         }
+        if (configStats->config_metadata_provider_promote_failure > 0) {
+            dprintf(out, "ConfigMetadataProviderPromotionFailure=%d",
+                    configStats->config_metadata_provider_promote_failure);
+        }
         dprintf(out, "\n");
         if (!configStats->is_valid) {
             dprintf(out, "\tinvalid config reason: %s\n",
@@ -1734,6 +1750,8 @@
                              configStats.db_deletion_config_removed, proto);
     writeNonZeroStatToStream(FIELD_TYPE_INT32 | FIELD_ID_DB_DELETION_CONFIG_UPDATED,
                              configStats.db_deletion_config_updated, proto);
+    writeNonZeroStatToStream(FIELD_TYPE_INT32 | FIELD_ID_CONFIG_METADATA_PROVIDER_PROMOTION_FAILED,
+                             configStats.config_metadata_provider_promote_failure, proto);
     for (int64_t latency : configStats.total_flush_latency_ns) {
         proto->write(FIELD_TYPE_INT64 | FIELD_ID_CONFIG_STATS_RESTRICTED_CONFIG_FLUSH_LATENCY |
                              FIELD_COUNT_REPEATED,
diff --git a/statsd/src/guardrail/StatsdStats.h b/statsd/src/guardrail/StatsdStats.h
index 6f20260..03266ee 100644
--- a/statsd/src/guardrail/StatsdStats.h
+++ b/statsd/src/guardrail/StatsdStats.h
@@ -91,6 +91,8 @@
     int32_t db_deletion_too_old = 0;
     int32_t db_deletion_config_removed = 0;
     int32_t db_deletion_config_updated = 0;
+    // Stores the number of ConfigMetadataProvider promotion failures
+    int32_t config_metadata_provider_promote_failure = 0;
 
     // Stores reasons for why config is valid or not
     std::optional<InvalidConfigReason> reason;
@@ -234,7 +236,11 @@
     static const int64_t kMinBroadcastPeriodNs = 60 * NS_PER_SEC;
 
     /* Min period between two checks of byte size per config key in nanoseconds. */
-    static const int64_t kMinByteSizeCheckPeriodNs = 60 * NS_PER_SEC;
+    static const int64_t kMinByteSizeCheckPeriodNs = 1 * 60 * NS_PER_SEC;
+
+    // Min period between two checks of byte size per config key in nanoseconds for V2 memory
+    // calculations.
+    static const int64_t kMinByteSizeV2CheckPeriodNs = 5 * 60 * NS_PER_SEC;
 
     /* Min period between two checks of restricted metrics TTLs. */
     static const int64_t kMinTtlCheckPeriodNs = 60 * 60 * NS_PER_SEC;
@@ -388,6 +394,11 @@
     void noteDbDeletionConfigUpdated(const ConfigKey& key);
 
     /**
+     * Reports that the promotion for ConfigMetadataProvider failed.
+     */
+    void noteConfigMetadataProviderPromotionFailed(const ConfigKey& key);
+
+    /**
      * Report the size of output tuple of a condition.
      *
      * Note: only report when the condition has an output dimension, and the tuple
@@ -1036,6 +1047,7 @@
     FRIEND_TEST(StatsdStatsTest, TestAtomLoggedAndDroppedStats);
     FRIEND_TEST(StatsdStatsTest, TestAtomMetricsStats);
     FRIEND_TEST(StatsdStatsTest, TestAtomSkippedStats);
+    FRIEND_TEST(StatsdStatsTest, TestConfigMetadataProviderPromotionFailed);
     FRIEND_TEST(StatsdStatsTest, TestConfigRemove);
     FRIEND_TEST(StatsdStatsTest, TestHasHitDimensionGuardrail);
     FRIEND_TEST(StatsdStatsTest, TestInvalidConfigAdd);
diff --git a/statsd/src/metrics/CountMetricProducer.cpp b/statsd/src/metrics/CountMetricProducer.cpp
index fb5f488..32be42e 100644
--- a/statsd/src/metrics/CountMetricProducer.cpp
+++ b/statsd/src/metrics/CountMetricProducer.cpp
@@ -216,6 +216,7 @@
 
 void CountMetricProducer::clearPastBucketsLocked(const int64_t dumpTimeNs) {
     mPastBuckets.clear();
+    mTotalDataSize = 0;
 }
 
 void CountMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
@@ -318,6 +319,7 @@
     if (erase_data) {
         mPastBuckets.clear();
         mDimensionGuardrailHit = false;
+        mTotalDataSize = 0;
     }
 }
 
@@ -325,6 +327,7 @@
     flushIfNeededLocked(dropTimeNs);
     StatsdStats::getInstance().noteBucketDropped(mMetricId);
     mPastBuckets.clear();
+    mTotalDataSize = 0;
 }
 
 void CountMetricProducer::onConditionChangedLocked(const bool conditionMet,
@@ -459,7 +462,10 @@
         if (countPassesThreshold(counter.second)) {
             info.mCount = counter.second;
             auto& bucketList = mPastBuckets[counter.first];
+            const bool isFirstBucket = bucketList.empty();
             bucketList.push_back(info);
+            mTotalDataSize += computeBucketSizeLocked(eventTimeNs < fullBucketEndTimeNs,
+                                                      counter.first, isFirstBucket);
             VLOG("metric %lld, dump key value: %s -> %lld", (long long)mMetricId,
                  counter.first.toString().c_str(), (long long)counter.second);
         }
@@ -505,6 +511,11 @@
 // greater than actual data size as it contains each dimension of
 // CountMetricData is  duplicated.
 size_t CountMetricProducer::byteSizeLocked() const {
+    sp<ConfigMetadataProvider> configMetadataProvider = getConfigMetadataProvider();
+    if (configMetadataProvider != nullptr && configMetadataProvider->useV2SoftMemoryCalculation()) {
+        return computeOverheadSizeLocked(!mPastBuckets.empty(), mDimensionGuardrailHit) +
+               mTotalDataSize;
+    }
     size_t totalSize = 0;
     for (const auto& pair : mPastBuckets) {
         totalSize += pair.second.size() * kBucketSize;
@@ -512,6 +523,24 @@
     return totalSize;
 }
 
+// Estimate for the size of a CountBucket.
+size_t CountMetricProducer::computeBucketSizeLocked(const bool isFullBucket,
+                                                    const MetricDimensionKey& dimKey,
+                                                    const bool isFirstBucket) const {
+    size_t bucketSize =
+            MetricProducer::computeBucketSizeLocked(isFullBucket, dimKey, isFirstBucket);
+
+    // Count Value
+    bucketSize += sizeof(int32_t);
+
+    // ConditionTrueNanos
+    if (mConditionTrackerIndex >= 0 && mSlicedStateAtoms.empty() && !mConditionSliced) {
+        bucketSize += sizeof(int64_t);
+    }
+
+    return bucketSize;
+}
+
 void CountMetricProducer::onActiveStateChangedLocked(const int64_t eventTimeNs,
                                                      const bool isActive) {
     MetricProducer::onActiveStateChangedLocked(eventTimeNs, isActive);
diff --git a/statsd/src/metrics/CountMetricProducer.h b/statsd/src/metrics/CountMetricProducer.h
index 757dad2..d34181c 100644
--- a/statsd/src/metrics/CountMetricProducer.h
+++ b/statsd/src/metrics/CountMetricProducer.h
@@ -101,6 +101,9 @@
 
     void onActiveStateChangedLocked(const int64_t eventTimeNs, const bool isActive) override;
 
+    size_t computeBucketSizeLocked(const bool isFullBucket, const MetricDimensionKey& dimKey,
+                                   const bool isFirstBucket) const override;
+
     optional<InvalidConfigReason> onConfigUpdatedLocked(
             const StatsdConfig& config, int configIndex, int metricIndex,
             const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
diff --git a/statsd/src/metrics/DurationMetricProducer.cpp b/statsd/src/metrics/DurationMetricProducer.cpp
index e18f83c..b07f2e3 100644
--- a/statsd/src/metrics/DurationMetricProducer.cpp
+++ b/statsd/src/metrics/DurationMetricProducer.cpp
@@ -519,14 +519,15 @@
 
     protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ID, (long long)mMetricId);
     protoOutput->write(FIELD_TYPE_BOOL | FIELD_ID_IS_ACTIVE, isActiveLocked());
-    protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ESTIMATED_MEMORY_BYTES,
-                       (long long)byteSizeLocked());
 
     if (mPastBuckets.empty()) {
         VLOG(" Duration metric, empty return");
         return;
     }
 
+    protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ESTIMATED_MEMORY_BYTES,
+                       (long long)byteSizeLocked());
+
     if (StatsdStats::getInstance().hasHitDimensionGuardrail(mMetricId)) {
         protoOutput->write(FIELD_TYPE_BOOL | FIELD_ID_DIMENSION_GUARDRAIL_HIT, true);
     }
@@ -846,8 +847,42 @@
                      eventTimeNs, values);
 }
 
+// Estimate for the size of a DurationBucket.
+size_t DurationMetricProducer::computeBucketSizeLocked(const bool isFullBucket,
+                                                       const MetricDimensionKey& dimKey,
+                                                       const bool isFirstBucket) const {
+    size_t bucketSize =
+            MetricProducer::computeBucketSizeLocked(isFullBucket, dimKey, isFirstBucket);
+
+    // Duration Value
+    bucketSize += sizeof(int64_t);
+
+    // ConditionTrueNanos
+    if (mConditionTrackerIndex >= 0 && mSlicedStateAtoms.empty() && !mConditionSliced) {
+        bucketSize += sizeof(int64_t);
+    }
+
+    return bucketSize;
+}
+
 size_t DurationMetricProducer::byteSizeLocked() const {
     size_t totalSize = 0;
+    sp<ConfigMetadataProvider> configMetadataProvider = getConfigMetadataProvider();
+    if (configMetadataProvider != nullptr && configMetadataProvider->useV2SoftMemoryCalculation()) {
+        bool hasHitDimensionGuardrail =
+                StatsdStats::getInstance().hasHitDimensionGuardrail(mMetricId);
+        totalSize += computeOverheadSizeLocked(!mPastBuckets.empty(), hasHitDimensionGuardrail);
+        for (const auto& pair : mPastBuckets) {
+            bool isFirstBucket = true;
+            for (const auto& bucket : pair.second) {
+                bool isFullBucket = bucket.mBucketEndNs - bucket.mBucketStartNs >= mBucketSizeNs;
+                totalSize +=
+                        computeBucketSizeLocked(isFullBucket, /*dimKey=*/pair.first, isFirstBucket);
+                isFirstBucket = false;
+            }
+        }
+        return totalSize;
+    }
     for (const auto& pair : mPastBuckets) {
         totalSize += pair.second.size() * kBucketSize;
     }
diff --git a/statsd/src/metrics/DurationMetricProducer.h b/statsd/src/metrics/DurationMetricProducer.h
index b7bc29a..c5403ff 100644
--- a/statsd/src/metrics/DurationMetricProducer.h
+++ b/statsd/src/metrics/DurationMetricProducer.h
@@ -139,6 +139,9 @@
     void addAnomalyTrackerLocked(sp<AnomalyTracker>& anomalyTracker,
                                  const UpdateStatus& updateStatus, int64_t updateTimeNs);
 
+    size_t computeBucketSizeLocked(const bool isFullBucket, const MetricDimensionKey& dimKey,
+                                   const bool isFirstBucket) const override;
+
     const DurationMetric_AggregationType mAggregationType;
 
     // Index of the SimpleAtomMatcher which defines the start.
diff --git a/statsd/src/metrics/EventMetricProducer.cpp b/statsd/src/metrics/EventMetricProducer.cpp
index 648e21c..540e3d0 100644
--- a/statsd/src/metrics/EventMetricProducer.cpp
+++ b/statsd/src/metrics/EventMetricProducer.cpp
@@ -83,7 +83,6 @@
         mConditionSliced = true;
     }
 
-    mTotalSize = 0;
     VLOG("metric %lld created. bucket size %lld start_time: %lld", (long long)mMetricId,
          (long long)mBucketSizeNs, (long long)mTimeBaseNs);
 }
@@ -139,9 +138,9 @@
 
 void EventMetricProducer::dropDataLocked(const int64_t dropTimeNs) {
     mAggregatedAtoms.clear();
-    mTotalSize = 0;
     mDataCorruptedDueToSocketLoss = false;
     mDataCorruptedDueToQueueOverflow = false;
+    mTotalDataSize = 0;
     StatsdStats::getInstance().noteBucketDropped(mMetricId);
 }
 
@@ -168,9 +167,9 @@
 
 void EventMetricProducer::clearPastBucketsLocked(const int64_t dumpTimeNs) {
     mAggregatedAtoms.clear();
-    mTotalSize = 0;
     mDataCorruptedDueToSocketLoss = false;
     mDataCorruptedDueToQueueOverflow = false;
+    mTotalDataSize = 0;
 }
 
 void EventMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
@@ -181,12 +180,13 @@
                                              ProtoOutputStream* protoOutput) {
     protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ID, (long long)mMetricId);
     protoOutput->write(FIELD_TYPE_BOOL | FIELD_ID_IS_ACTIVE, isActiveLocked());
-    protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ESTIMATED_MEMORY_BYTES,
-                       (long long)byteSizeLocked());
     // Data corrupted reason
     writeDataCorruptedReasons(*protoOutput, FIELD_ID_DATA_CORRUPTED_REASON,
                               mDataCorruptedDueToQueueOverflow, mDataCorruptedDueToSocketLoss);
-
+    if (!mAggregatedAtoms.empty()) {
+        protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ESTIMATED_MEMORY_BYTES,
+                           (long long)byteSizeLocked());
+    }
     uint64_t protoToken = protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_ID_EVENT_METRICS);
     for (const auto& [atomDimensionKey, elapsedTimestampsNs] : mAggregatedAtoms) {
         uint64_t wrapperToken =
@@ -210,9 +210,9 @@
     protoOutput->end(protoToken);
     if (erase_data) {
         mAggregatedAtoms.clear();
-        mTotalSize = 0;
         mDataCorruptedDueToSocketLoss = false;
         mDataCorruptedDueToQueueOverflow = false;
+        mTotalDataSize = 0;
     }
 }
 
@@ -239,14 +239,24 @@
 
     std::vector<int64_t>& aggregatedTimestampsNs = mAggregatedAtoms[key];
     if (aggregatedTimestampsNs.empty()) {
-        mTotalSize += getSize(key.getAtomFieldValues().getValues());
+        sp<ConfigMetadataProvider> provider = getConfigMetadataProvider();
+        if (provider != nullptr && provider->useV2SoftMemoryCalculation()) {
+            mTotalDataSize += getFieldValuesSizeV2(key.getAtomFieldValues().getValues());
+        } else {
+            mTotalDataSize += getSize(key.getAtomFieldValues().getValues());
+        }
     }
     aggregatedTimestampsNs.push_back(elapsedTimeNs);
-    mTotalSize += sizeof(int64_t); // Add the size of the event timestamp
+    mTotalDataSize += sizeof(int64_t);  // Add the size of the event timestamp
 }
 
 size_t EventMetricProducer::byteSizeLocked() const {
-    return mTotalSize;
+    sp<ConfigMetadataProvider> provider = getConfigMetadataProvider();
+    if (provider != nullptr && provider->useV2SoftMemoryCalculation()) {
+        return mTotalDataSize +
+               computeOverheadSizeLocked(/*hasPastBuckets=*/false, /*dimensionGuardrailHit=*/false);
+    }
+    return mTotalDataSize;
 }
 
 }  // namespace statsd
diff --git a/statsd/src/metrics/EventMetricProducer.h b/statsd/src/metrics/EventMetricProducer.h
index aed4358..d64552b 100644
--- a/statsd/src/metrics/EventMetricProducer.h
+++ b/statsd/src/metrics/EventMetricProducer.h
@@ -51,9 +51,6 @@
         return METRIC_TYPE_EVENT;
     }
 
-protected:
-    size_t mTotalSize;
-
 private:
     void onMatchedLogEventInternalLocked(
             const size_t matcherIndex, const MetricDimensionKey& eventKey,
diff --git a/statsd/src/metrics/GaugeMetricProducer.cpp b/statsd/src/metrics/GaugeMetricProducer.cpp
index f82f04e..979b344 100644
--- a/statsd/src/metrics/GaugeMetricProducer.cpp
+++ b/statsd/src/metrics/GaugeMetricProducer.cpp
@@ -244,6 +244,7 @@
     flushIfNeededLocked(dumpTimeNs);
     mPastBuckets.clear();
     mSkippedBuckets.clear();
+    mTotalDataSize = 0;
 }
 
 void GaugeMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
@@ -261,13 +262,14 @@
 
     protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ID, (long long)mMetricId);
     protoOutput->write(FIELD_TYPE_BOOL | FIELD_ID_IS_ACTIVE, isActiveLocked());
-    protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ESTIMATED_MEMORY_BYTES,
-                       (long long)byteSizeLocked());
 
     if (mPastBuckets.empty() && mSkippedBuckets.empty()) {
         return;
     }
 
+    protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ESTIMATED_MEMORY_BYTES,
+                       (long long)byteSizeLocked());
+
     if (mDimensionGuardrailHit) {
         protoOutput->write(FIELD_TYPE_BOOL | FIELD_ID_DIMENSION_GUARDRAIL_HIT,
                            mDimensionGuardrailHit);
@@ -373,6 +375,7 @@
         mPastBuckets.clear();
         mSkippedBuckets.clear();
         mDimensionGuardrailHit = false;
+        mTotalDataSize = 0;
     }
 }
 
@@ -626,6 +629,7 @@
     flushIfNeededLocked(dropTimeNs);
     StatsdStats::getInstance().noteBucketDropped(mMetricId);
     mPastBuckets.clear();
+    mTotalDataSize = 0;
 }
 
 // When a new matched event comes in, we check if event falls into the current
@@ -673,7 +677,11 @@
                 elapsedTimestampsNs.push_back(atom.mElapsedTimestampNs);
             }
             auto& bucketList = mPastBuckets[slice.first];
+            const bool isFirstBucket = bucketList.empty();
             bucketList.push_back(info);
+            mTotalDataSize += computeGaugeBucketSizeLocked(eventTimeNs >= fullBucketEndTimeNs,
+                                                           /*dimKey=*/slice.first, isFirstBucket,
+                                                           info.mAggregatedAtoms);
             VLOG("Gauge gauge metric %lld, dump key value: %s", (long long)mMetricId,
                  slice.first.toString().c_str());
         }
@@ -685,6 +693,7 @@
                     buildDropEvent(eventTimeNs, BucketDropReason::BUCKET_TOO_SMALL));
         }
         mSkippedBuckets.emplace_back(mCurrentSkippedBucket);
+        mTotalDataSize += computeSkippedBucketSizeLocked(mCurrentSkippedBucket);
     }
 
     // If we have anomaly trackers, we need to update the partial bucket values.
@@ -708,7 +717,29 @@
     mHasHitGuardrail = false;
 }
 
+// Estimate for the size of a GaugeBucket.
+size_t GaugeMetricProducer::computeGaugeBucketSizeLocked(
+        const bool isFullBucket, const MetricDimensionKey& dimKey, const bool isFirstBucket,
+        const std::unordered_map<AtomDimensionKey, std::vector<int64_t>>& aggregatedAtoms) const {
+    size_t bucketSize =
+            MetricProducer::computeBucketSizeLocked(isFullBucket, dimKey, isFirstBucket);
+
+    // Gauge Atoms and timestamps
+    for (const auto& pair : aggregatedAtoms) {
+        bucketSize += getFieldValuesSizeV2(pair.first.getAtomFieldValues().getValues());
+        bucketSize += sizeof(int64_t) * pair.second.size();
+    }
+
+    return bucketSize;
+}
+
 size_t GaugeMetricProducer::byteSizeLocked() const {
+    sp<ConfigMetadataProvider> configMetadataProvider = getConfigMetadataProvider();
+    if (configMetadataProvider != nullptr && configMetadataProvider->useV2SoftMemoryCalculation()) {
+        return computeOverheadSizeLocked(!mPastBuckets.empty() || !mSkippedBuckets.empty(),
+                                         mDimensionGuardrailHit) +
+               mTotalDataSize;
+    }
     size_t totalSize = 0;
     for (const auto& pair : mPastBuckets) {
         for (const auto& bucket : pair.second) {
diff --git a/statsd/src/metrics/GaugeMetricProducer.h b/statsd/src/metrics/GaugeMetricProducer.h
index 18e0b43..daff08c 100644
--- a/statsd/src/metrics/GaugeMetricProducer.h
+++ b/statsd/src/metrics/GaugeMetricProducer.h
@@ -149,6 +149,11 @@
     // Only call if mCondition == ConditionState::kTrue && metric is active.
     void pullAndMatchEventsLocked(const int64_t timestampNs);
 
+    size_t computeGaugeBucketSizeLocked(
+            const bool isFullBucket, const MetricDimensionKey& dimKey, const bool isFirstBucket,
+            const std::unordered_map<AtomDimensionKey, std::vector<int64_t>>& aggregatedAtoms)
+            const;
+
     optional<InvalidConfigReason> onConfigUpdatedLocked(
             const StatsdConfig& config, int configIndex, int metricIndex,
             const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
diff --git a/statsd/src/metrics/KllMetricProducer.cpp b/statsd/src/metrics/KllMetricProducer.cpp
index 26aafa4..cc59e3c 100644
--- a/statsd/src/metrics/KllMetricProducer.cpp
+++ b/statsd/src/metrics/KllMetricProducer.cpp
@@ -153,7 +153,26 @@
     return bucket;
 }
 
+// Estimate for the size of NumericValues.
+size_t KllMetricProducer::getAggregatedValueSize(const std::unique_ptr<KllQuantile>& kll) const {
+    size_t valueSize = 0;
+    // Index
+    valueSize += sizeof(int32_t);
+
+    // Value
+    valueSize += kll->SerializeToProto().ByteSizeLong();
+
+    return valueSize;
+}
+
 size_t KllMetricProducer::byteSizeLocked() const {
+    sp<ConfigMetadataProvider> configMetadataProvider = getConfigMetadataProvider();
+    if (configMetadataProvider != nullptr && configMetadataProvider->useV2SoftMemoryCalculation()) {
+        bool dimensionGuardrailHit = StatsdStats::getInstance().hasHitDimensionGuardrail(mMetricId);
+        return computeOverheadSizeLocked(!mPastBuckets.empty() || !mSkippedBuckets.empty(),
+                                         dimensionGuardrailHit) +
+               mTotalDataSize;
+    }
     size_t totalSize = 0;
     for (const auto& [_, buckets] : mPastBuckets) {
         totalSize += buckets.size() * kBucketSize;
diff --git a/statsd/src/metrics/KllMetricProducer.h b/statsd/src/metrics/KllMetricProducer.h
index 16c1e2a..dd5dcbd 100644
--- a/statsd/src/metrics/KllMetricProducer.h
+++ b/statsd/src/metrics/KllMetricProducer.h
@@ -103,6 +103,8 @@
                                          const int sampleSize,
                                          ProtoOutputStream* const protoOutput) const override;
 
+    size_t getAggregatedValueSize(const std::unique_ptr<KllQuantile>& kll) const override;
+
     bool aggregateFields(const int64_t eventTimeNs, const MetricDimensionKey& eventKey,
                          const LogEvent& event, std::vector<Interval>& intervals,
                          Empty& empty) override;
diff --git a/statsd/src/metrics/MetricProducer.cpp b/statsd/src/metrics/MetricProducer.cpp
index ad2c8cc..ac80a20 100644
--- a/statsd/src/metrics/MetricProducer.cpp
+++ b/statsd/src/metrics/MetricProducer.cpp
@@ -391,6 +391,69 @@
                             mShardCount);
 }
 
+sp<ConfigMetadataProvider> MetricProducer::getConfigMetadataProvider() const {
+    sp<ConfigMetadataProvider> provider = mConfigMetadataProvider.promote();
+    if (provider == nullptr) {
+        ALOGE("Could not promote ConfigMetadataProvider");
+        StatsdStats::getInstance().noteConfigMetadataProviderPromotionFailed(mConfigKey);
+    }
+    return provider;
+}
+
+size_t MetricProducer::computeBucketSizeLocked(const bool isFullBucket,
+                                               const MetricDimensionKey& dimKey,
+                                               const bool isFirstBucket) const {
+    size_t bucketSize = 0;
+
+    // Bucket timestamps or bucket number
+    bucketSize += isFullBucket ? sizeof(int32_t) : 2 * sizeof(int64_t);
+
+    // Each dimension / state key can have multiple buckets. Add the size only for the first bucket.
+    if (isFirstBucket) {
+        bucketSize += dimKey.getSize(mShouldUseNestedDimensions);
+    }
+
+    return bucketSize;
+}
+
+size_t MetricProducer::computeOverheadSizeLocked(const bool hasPastBuckets,
+                                                 const bool dimensionGuardrailHit) const {
+    size_t overheadSize = 0;
+
+    // MetricId + isActive
+    overheadSize += sizeof(int64_t) + sizeof(bool);
+
+    if (hasPastBuckets) {
+        if (dimensionGuardrailHit) {
+            overheadSize += sizeof(int32_t);
+        }
+
+        // estimated_memory_bytes
+        overheadSize += sizeof(int32_t);
+        // mTimeBase and mBucketSizeNs
+        overheadSize += 2 * sizeof(int64_t);
+
+        if (!mShouldUseNestedDimensions) {
+            // Assume dimensions data adds an additional atomTag + # of dimension fields
+            overheadSize += sizeof(int32_t);
+            overheadSize += sizeof(int32_t) * mDimensionsInWhat.size();
+        }
+    }
+    return overheadSize;
+}
+
+size_t MetricProducer::computeSkippedBucketSizeLocked(const SkippedBucket& skippedBucket) const {
+    size_t skippedBucketSize = 0;
+
+    // Bucket Start, Bucket End
+    skippedBucketSize += 2 * sizeof(int64_t);
+
+    // DropType, Drop Time
+    skippedBucketSize += (sizeof(int32_t) + sizeof(int64_t)) * skippedBucket.dropEvents.size();
+
+    return skippedBucketSize;
+}
+
 }  // namespace statsd
 }  // namespace os
 }  // namespace android
diff --git a/statsd/src/metrics/MetricProducer.h b/statsd/src/metrics/MetricProducer.h
index d134425..cb97ae5 100644
--- a/statsd/src/metrics/MetricProducer.h
+++ b/statsd/src/metrics/MetricProducer.h
@@ -457,6 +457,15 @@
     void activateLocked(int activationTrackerIndex, int64_t elapsedTimestampNs);
     void cancelEventActivationLocked(int deactivationTrackerIndex);
 
+    // Computes the size of a newly added bucket to this metric, taking into account any new
+    // dimensions that are introduced if necessary.
+    virtual size_t computeBucketSizeLocked(const bool isFullBucket,
+                                           const MetricDimensionKey& dimKey,
+                                           const bool isFirstBucket) const;
+    size_t computeOverheadSizeLocked(const bool hasPastBuckets,
+                                     const bool dimensionGuardrailHit) const;
+    size_t computeSkippedBucketSizeLocked(const SkippedBucket& skippedBucket) const;
+
     bool evaluateActiveStateLocked(int64_t elapsedTimestampNs);
 
     virtual void onActiveStateChangedLocked(const int64_t eventTimeNs, const bool isActive) {
@@ -588,14 +597,14 @@
 
     int mShardCount;
 
-    inline wp<ConfigMetadataProvider> getConfigMetadataProvider() const {
-        return mConfigMetadataProvider;
-    }
+    sp<ConfigMetadataProvider> getConfigMetadataProvider() const;
 
     wp<ConfigMetadataProvider> mConfigMetadataProvider;
     bool mDataCorruptedDueToSocketLoss = false;
     bool mDataCorruptedDueToQueueOverflow = false;
 
+    size_t mTotalDataSize = 0;
+
     FRIEND_TEST(CountMetricE2eTest, TestSlicedState);
     FRIEND_TEST(CountMetricE2eTest, TestSlicedStateWithMap);
     FRIEND_TEST(CountMetricE2eTest, TestMultipleSlicedStates);
diff --git a/statsd/src/metrics/NumericValueMetricProducer.cpp b/statsd/src/metrics/NumericValueMetricProducer.cpp
index 53a1901..ed5032b 100644
--- a/statsd/src/metrics/NumericValueMetricProducer.cpp
+++ b/statsd/src/metrics/NumericValueMetricProducer.cpp
@@ -630,7 +630,30 @@
     }
 }
 
+// Estimate for the size of NumericValues.
+size_t NumericValueMetricProducer::getAggregatedValueSize(const Value& value) const {
+    size_t valueSize = 0;
+    // Index
+    valueSize += sizeof(int32_t);
+
+    // Value
+    valueSize += value.getSize();
+
+    // Sample Size
+    if (mIncludeSampleSize) {
+        valueSize += sizeof(int32_t);
+    }
+    return valueSize;
+}
+
 size_t NumericValueMetricProducer::byteSizeLocked() const {
+    sp<ConfigMetadataProvider> configMetadataProvider = getConfigMetadataProvider();
+    if (configMetadataProvider != nullptr && configMetadataProvider->useV2SoftMemoryCalculation()) {
+        bool dimensionGuardrailHit = StatsdStats::getInstance().hasHitDimensionGuardrail(mMetricId);
+        return computeOverheadSizeLocked(!mPastBuckets.empty() || !mSkippedBuckets.empty(),
+                                         dimensionGuardrailHit) +
+               mTotalDataSize;
+    }
     size_t totalSize = 0;
     for (const auto& [_, buckets] : mPastBuckets) {
         totalSize += buckets.size() * kBucketSize;
diff --git a/statsd/src/metrics/NumericValueMetricProducer.h b/statsd/src/metrics/NumericValueMetricProducer.h
index 103f404..8309aef 100644
--- a/statsd/src/metrics/NumericValueMetricProducer.h
+++ b/statsd/src/metrics/NumericValueMetricProducer.h
@@ -150,6 +150,8 @@
         return mAggregationTypes.size() == 1 ? mAggregationTypes[0] : mAggregationTypes[index];
     }
 
+    size_t getAggregatedValueSize(const Value& value) const override;
+
     const bool mUseAbsoluteValueOnReset;
 
     const std::vector<ValueMetric::AggregationType> mAggregationTypes;
diff --git a/statsd/src/metrics/RestrictedEventMetricProducer.cpp b/statsd/src/metrics/RestrictedEventMetricProducer.cpp
index 83454fb..77682dd 100644
--- a/statsd/src/metrics/RestrictedEventMetricProducer.cpp
+++ b/statsd/src/metrics/RestrictedEventMetricProducer.cpp
@@ -58,11 +58,11 @@
         StatsdStats::getInstance().noteRestrictedMetricCategoryChanged(mConfigKey, mMetricId);
         deleteMetricTable();
         mLogEvents.clear();
-        mTotalSize = 0;
+        mTotalDataSize = 0;
     }
     mRestrictedDataCategory = event.getRestrictionCategory();
     mLogEvents.push_back(event);
-    mTotalSize += getSize(event.getValues()) + sizeof(event);
+    mTotalDataSize += getSize(event.getValues()) + sizeof(event);
 }
 
 void RestrictedEventMetricProducer::onDumpReportLocked(
@@ -94,7 +94,7 @@
 
 void RestrictedEventMetricProducer::dropDataLocked(const int64_t dropTimeNs) {
     mLogEvents.clear();
-    mTotalSize = 0;
+    mTotalDataSize = 0;
     StatsdStats::getInstance().noteBucketDropped(mMetricId);
 }
 
@@ -130,7 +130,7 @@
                 mConfigKey, mMetricId, getElapsedRealtimeNs() - flushStartNs);
     }
     mLogEvents.clear();
-    mTotalSize = 0;
+    mTotalDataSize = 0;
 }
 
 bool RestrictedEventMetricProducer::writeMetricMetadataToProto(
diff --git a/statsd/src/metrics/ValueMetricProducer.cpp b/statsd/src/metrics/ValueMetricProducer.cpp
index f4620dd..b1f0a33 100644
--- a/statsd/src/metrics/ValueMetricProducer.cpp
+++ b/statsd/src/metrics/ValueMetricProducer.cpp
@@ -226,6 +226,31 @@
 }
 
 template <typename AggregatedValue, typename DimExtras>
+size_t ValueMetricProducer<AggregatedValue, DimExtras>::computeValueBucketSizeLocked(
+        const bool isFullBucket, const MetricDimensionKey& dimKey, const bool isFirstBucket,
+        const PastBucket<AggregatedValue>& bucket) const {
+    size_t bucketSize =
+            MetricProducer::computeBucketSizeLocked(isFullBucket, dimKey, isFirstBucket);
+
+    for (const auto& value : bucket.aggregates) {
+        bucketSize += getAggregatedValueSize(value);
+    }
+
+    // ConditionTrueNanos
+    if (mConditionTrackerIndex >= 0 || !mSlicedStateAtoms.empty()) {
+        bucketSize += sizeof(int64_t);
+    }
+
+    // ConditionCorrectionNanos
+    if (getDumpProtoFields().conditionCorrectionNsFieldId.has_value() && isPulled() &&
+        mConditionCorrectionThresholdNs &&
+        (abs(bucket.mConditionCorrectionNs) >= mConditionCorrectionThresholdNs)) {
+        bucketSize += sizeof(int64_t);
+    }
+    return bucketSize;
+}
+
+template <typename AggregatedValue, typename DimExtras>
 void ValueMetricProducer<AggregatedValue, DimExtras>::onStateChanged(
         int64_t eventTimeNs, int32_t atomId, const HashableDimensionKey& primaryKey,
         const FieldValue& oldState, const FieldValue& newState) {
@@ -291,6 +316,7 @@
         const int64_t dumpTimeNs) {
     mPastBuckets.clear();
     mSkippedBuckets.clear();
+    mTotalDataSize = 0;
 }
 
 template <typename AggregatedValue, typename DimExtras>
@@ -324,12 +350,13 @@
 
     protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ID, (long long)mMetricId);
     protoOutput->write(FIELD_TYPE_BOOL | FIELD_ID_IS_ACTIVE, isActiveLocked());
-    protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ESTIMATED_MEMORY_BYTES,
-                       (long long)byteSizeLocked());
     if (mPastBuckets.empty() && mSkippedBuckets.empty()) {
         return;
     }
 
+    protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ESTIMATED_MEMORY_BYTES,
+                       (long long)byteSizeLocked());
+
     if (StatsdStats::getInstance().hasHitDimensionGuardrail(mMetricId)) {
         protoOutput->write(FIELD_TYPE_BOOL | FIELD_ID_DIMENSION_GUARDRAIL_HIT, true);
     }
@@ -447,6 +474,7 @@
     if (eraseData) {
         mPastBuckets.clear();
         mSkippedBuckets.clear();
+        mTotalDataSize = 0;
     }
 }
 
@@ -822,7 +850,10 @@
             }
 
             auto& bucketList = mPastBuckets[metricDimensionKey];
+            const bool isFirstBucket = bucketList.empty();
             bucketList.push_back(std::move(bucket));
+            mTotalDataSize += computeValueBucketSizeLocked(
+                    eventTimeNs >= fullBucketEndTimeNs, metricDimensionKey, isFirstBucket, bucket);
         }
         if (!bucketHasData) {
             skipCurrentBucket(eventTimeNs, BucketDropReason::NO_DATA);
@@ -833,6 +864,7 @@
         mCurrentSkippedBucket.bucketStartTimeNs = mCurrentBucketStartTimeNs;
         mCurrentSkippedBucket.bucketEndTimeNs = bucketEndTimeNs;
         mSkippedBuckets.push_back(mCurrentSkippedBucket);
+        mTotalDataSize += computeSkippedBucketSizeLocked(mCurrentSkippedBucket);
     }
 
     // This means that the current bucket was not flushed before a forced bucket split.
diff --git a/statsd/src/metrics/ValueMetricProducer.h b/statsd/src/metrics/ValueMetricProducer.h
index bfde6a0..78761eb 100644
--- a/statsd/src/metrics/ValueMetricProducer.h
+++ b/statsd/src/metrics/ValueMetricProducer.h
@@ -232,6 +232,12 @@
             std::unordered_map<int, std::vector<int>>& deactivationAtomTrackerToMetricMap,
             std::vector<int>& metricsWithActivation) override;
 
+    size_t computeValueBucketSizeLocked(const bool isFullBucket, const MetricDimensionKey& dimKey,
+                                        const bool isFirstBucket,
+                                        const PastBucket<AggregatedValue>& bucket) const;
+
+    virtual size_t getAggregatedValueSize(const AggregatedValue& value) const = 0;
+
     virtual optional<int64_t> getConditionIdForMetric(const StatsdConfig& config,
                                                       const int configIndex) const = 0;
 
diff --git a/statsd/src/stats_log.proto b/statsd/src/stats_log.proto
index 00763de..93b0d8c 100644
--- a/statsd/src/stats_log.proto
+++ b/statsd/src/stats_log.proto
@@ -505,6 +505,7 @@
         optional int32 db_deletion_too_old = 35;
         optional int32 db_deletion_config_removed = 36;
         optional int32 db_deletion_config_updated = 37;
+        optional int32 config_metadata_provider_promotion_failed = 38;
     }
 
     repeated ConfigStats config_stats = 3;
diff --git a/statsd/tests/guardrail/StatsdStats_test.cpp b/statsd/tests/guardrail/StatsdStats_test.cpp
index 86e8093..9232f5b 100644
--- a/statsd/tests/guardrail/StatsdStats_test.cpp
+++ b/statsd/tests/guardrail/StatsdStats_test.cpp
@@ -88,6 +88,20 @@
     EXPECT_FALSE(configReport.has_deletion_time_sec());
 }
 
+TEST(StatsdStatsTest, TestConfigMetadataProviderPromotionFailed) {
+    StatsdStats stats;
+    ConfigKey key(0, 12345);
+    stats.noteConfigReceived(key, /*metricsCount=*/0, /*conditionsCount=*/0, /*matchersCount=*/0,
+                             /*alertCount=*/0, /*annotations=*/{}, nullopt /*valid config*/);
+
+    stats.noteConfigMetadataProviderPromotionFailed(key);
+
+    StatsdStatsReport report = getStatsdStatsReport(stats, /* reset stats */ false);
+    ASSERT_EQ(1, report.config_stats_size());
+    const auto& configReport = report.config_stats(0);
+    EXPECT_EQ(1, configReport.config_metadata_provider_promotion_failed());
+}
+
 TEST(StatsdStatsTest, TestInvalidConfigAdd) {
     StatsdStats stats;
     ConfigKey key(0, 12345);