Snap for 10453938 from e2e0a5cdc8fcdf9b12e1f5d9d399849f1bec12ee to mainline-odp-release

Change-Id: I8d0cc455fada3721a7b541e2ba6022de2c721142
diff --git a/.clangd b/.clangd
new file mode 100644
index 0000000..cf07f21
--- /dev/null
+++ b/.clangd
@@ -0,0 +1,10 @@
+Diagnostics:
+  ClangTidy:
+    # Add: [modernize-*, google-*, android-*, cppcoreguidelines-*]
+    Add:
+      - cppcoreguidelines-init-variables
+      - cppcoreguidelines-pro-type-member-init
+    Remove:
+      - modernize-use-trailing-return-type
+      - modernize-replace-disallow-copy-and-assign-macro
+      - cppcoreguidelines-pro-type-reinterpret-cast
diff --git a/.gitignore b/.gitignore
index 84f29a3..f51c633 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,3 +10,7 @@
 /update_engine_unittests
 *.pyc
 .vscode
+.git
+.cache
+.xmake
+
diff --git a/Android.bp b/Android.bp
index cace5b6..c321de8 100644
--- a/Android.bp
+++ b/Android.bp
@@ -145,8 +145,8 @@
     shared_libs: ["libprotobuf-cpp-lite"],
 }
 
-cc_library_static {
-    name: "update_metadata-protos",
+cc_defaults {
+    name: "update_metadata-protos-defaults",
     host_supported: true,
     ramdisk_available: true,
     recovery_available: true,
@@ -162,15 +162,35 @@
     },
 }
 
+cc_library_static {
+    name: "update_metadata-protos",
+    defaults: ["update_metadata-protos-defaults"],
+    proto: {
+        type: "lite",
+    },
+}
+
+cc_library_static {
+    name: "update_metadata-protos-full",
+    defaults: ["update_metadata-protos-defaults"],
+}
+
+python_library_host {
+    name: "update_metadata-protos-python",
+    srcs: ["update_metadata.proto"],
+    proto: {
+        canonical_path_from_root: false,
+    },
+}
+
 // libpayload_consumer (type: static_library)
 // ========================================================
 // The payload application component and common dependencies.
 cc_defaults {
-    name: "libpayload_consumer_exports",
-    defaults: ["update_metadata-protos_exports"],
+    name: "libpayload_consumer_exports_defaults",
+    defaults: ["update_metadata-protos_exports",],
 
     static_libs: [
-        "update_metadata-protos",
         "libxz",
         "libbz",
         "libbspatch",
@@ -187,6 +207,7 @@
         "libcow_operation_convert",
         "lz4diff-protos",
         "liblz4patch",
+        "libzstd",
     ],
     shared_libs: [
         "libbase",
@@ -197,12 +218,24 @@
     ],
 }
 
-cc_library_static {
-    name: "libpayload_consumer",
+cc_defaults {
+    name: "libpayload_consumer_exports",
     defaults: [
-        "ue_defaults",
-        "libpayload_consumer_exports",
+        "libpayload_consumer_exports_defaults"
     ],
+    static_libs: ["update_metadata-protos",],
+}
+
+cc_defaults {
+    name: "libpayload_consumer_exports_proto-full",
+    defaults: [
+        "libpayload_consumer_exports_defaults"
+    ],
+    static_libs: ["update_metadata-protos-full",],
+}
+
+cc_defaults {
+    name: "libpayload_consumer_defaults",
     host_supported: true,
     recovery_available: true,
 
@@ -222,7 +255,6 @@
         "common/hwid_override.cc",
         "common/multi_range_http_fetcher.cc",
         "common/prefs.cc",
-        "common/proxy_resolver.cc",
         "common/subprocess.cc",
         "common/terminator.cc",
         "common/utils.cc",
@@ -259,32 +291,78 @@
     ],
 }
 
+cc_library_static {
+    name: "libpayload_consumer",
+    defaults: [
+        "ue_defaults",
+        "libpayload_consumer_exports",
+        "libpayload_consumer_defaults",
+    ],
+}
+
+cc_library_static {
+    name: "libpayload_consumer_proto-full",
+    defaults: [
+        "ue_defaults",
+        "libpayload_consumer_exports_proto-full",
+        "libpayload_consumer_defaults",
+    ],
+}
+
+cc_library_static {
+    name: "libstatslog_ue",
+    generated_sources: ["statslog_ue.cpp"],
+    generated_headers: ["statslog_ue.h"],
+    export_generated_headers: ["statslog_ue.h"],
+    shared_libs: [
+        "libstatssocket",
+    ]
+}
+
+genrule {
+    name: "statslog_ue.h",
+    tools: ["stats-log-api-gen"],
+    cmd: "$(location stats-log-api-gen) --header $(genDir)/statslog_ue.h --module update_engine --namespace chromeos_update_engine,statsd",
+    out: [
+        "statslog_ue.h",
+    ],
+}
+
+genrule {
+    name: "statslog_ue.cpp",
+    tools: ["stats-log-api-gen"],
+    cmd: "$(location stats-log-api-gen) --cpp $(genDir)/statslog_ue.cpp --module update_engine --namespace chromeos_update_engine,statsd --importHeader statslog_ue.h",
+    out: [
+        "statslog_ue.cpp",
+    ],
+}
+
 // libupdate_engine_boot_control (type: static_library)
 // ========================================================
 // A BootControl class implementation using Android's HIDL boot_control HAL.
 cc_defaults {
-    name: "libupdate_engine_boot_control_exports",
-    defaults: ["update_metadata-protos_exports"],
+    name: "libupdate_engine_boot_control_exports_defaults",
+    defaults: ["update_metadata-protos_exports",],
 
     static_libs: [
         "libcutils",
         "libfs_mgr_binder",
         "libgsi",
-        "libpayload_consumer",
         "libsnapshot",
         "libsnapshot_cow",
+        "libstatslog_ue",
         "libz",
-        "update_metadata-protos",
     ],
     shared_libs: [
         "libbootloader_message",
         "libhidlbase",
         "liblp",
-        "libstatslog",
         "libutils",
         "android.hardware.boot@1.0",
         "android.hardware.boot@1.1",
         "android.hardware.boot@1.2",
+        "android.hardware.boot-V1-ndk",
+        "libboot_control_client",
     ],
     header_libs: [
         "avb_headers",
@@ -298,20 +376,38 @@
             exclude_static_libs: [
                 "libfs_mgr_binder",
                 "libsnapshot",
-            ],
-            exclude_shared_libs: [
-                "libstatslog",
+                "libstatslog_ue"
             ],
         },
     },
 }
 
-cc_library_static {
-    name: "libupdate_engine_boot_control",
+cc_defaults {
+    name: "libupdate_engine_boot_control_exports",
+    defaults: [
+        "libupdate_engine_boot_control_exports_defaults",
+    ],
+    static_libs: [
+        "libpayload_consumer",
+        "update_metadata-protos",
+    ]
+}
+
+cc_defaults {
+    name: "libupdate_engine_boot_control_exports_proto-full",
+    defaults: [
+        "libupdate_engine_boot_control_exports_defaults",
+    ],
+    static_libs: [
+        "libpayload_consumer_proto-full",
+        "update_metadata-protos-full",
+    ]
+}
+
+cc_defaults {
+    name: "libupdate_engine_boot_control_defaults",
     defaults: [
         "ue_defaults",
-        "libupdate_engine_boot_control_exports",
-        "libpayload_consumer_exports",
     ],
     recovery_available: true,
 
@@ -323,25 +419,40 @@
     ],
 }
 
+cc_library_static {
+    name: "libupdate_engine_boot_control",
+    defaults: [
+        "libupdate_engine_boot_control_defaults",
+        "libupdate_engine_boot_control_exports",
+        "libpayload_consumer_exports",
+    ],
+}
+
+cc_library_static {
+    name: "libupdate_engine_boot_control_proto-full",
+    defaults: [
+        "libupdate_engine_boot_control_defaults",
+        "libupdate_engine_boot_control_exports_proto-full",
+        "libpayload_consumer_exports_proto-full",
+    ],
+}
+
 // libupdate_engine_android (type: static_library)
 // ========================================================
 // The main daemon static_library used in Android (non-Brillo). This only has a
 // loop to apply payloads provided by the upper layer via a Binder interface.
 cc_defaults {
-    name: "libupdate_engine_android_exports",
+    name: "libupdate_engine_android_exports_defaults",
     defaults: [
         "ue_defaults",
-        "libpayload_consumer_exports",
-        "libupdate_engine_boot_control_exports",
     ],
 
     static_libs: [
         "libavb",
         "libavb_user",
-        "gkiprops",
-        "libpayload_consumer",
-        "libupdate_engine_boot_control",
-        "PlatformProperties",
+        "libgkiprops",
+        "libstatslog_ue",
+        "libPlatformProperties",
     ],
     shared_libs: [
         "apex_aidl_interface-cpp",
@@ -356,25 +467,46 @@
         "libupdate_engine_stable-V1-cpp",
         "liblog",
         "libssl",
-        "libstatslog",
+        "libstatssocket",
         "libutils",
     ],
     whole_static_libs: [
-        "com.android.sysprop.apex",
+        "libcom.android.sysprop.apex",
     ],
 }
 
-cc_library_static {
-    name: "libupdate_engine_android",
+cc_defaults {
+    name: "libupdate_engine_android_exports",
+    defaults: [
+        "libupdate_engine_android_exports_defaults",
+        "libupdate_engine_boot_control_exports",
+        "libpayload_consumer_exports",
+    ],
+    static_libs: [
+        "libpayload_consumer",
+        "libupdate_engine_boot_control",
+    ],
+}
+
+cc_defaults {
+    name: "libupdate_engine_android_exports_proto-full",
+    defaults: [
+        "libupdate_engine_android_exports_defaults",
+        "libupdate_engine_boot_control_exports_proto-full",
+        "libpayload_consumer_exports_proto-full",
+    ],
+    static_libs: [
+        "libpayload_consumer_proto-full",
+        "libupdate_engine_boot_control_proto-full",
+    ],
+}
+
+cc_defaults {
+    name: "libupdate_engine_android_defaults",
     defaults: [
         "ue_defaults",
-        "libupdate_engine_android_exports",
     ],
 
-    // TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
-    // out of the DBus interface.
-    include_dirs: ["external/cros/system_api/dbus"],
-
     aidl: {
         local_include_dirs: ["binder_bindings"],
         export_aidl_headers: true,
@@ -400,6 +532,22 @@
     ],
 }
 
+cc_library_static {
+    name: "libupdate_engine_android",
+    defaults: [
+        "libupdate_engine_android_defaults",
+        "libupdate_engine_android_exports",
+    ]
+}
+
+cc_library_static {
+    name: "libupdate_engine_android_proto-full",
+    defaults: [
+        "libupdate_engine_android_defaults",
+        "libupdate_engine_android_exports_proto-full",
+    ]
+}
+
 // update_engine (type: executable)
 // ========================================================
 // update_engine daemon.
@@ -410,7 +558,10 @@
         "libupdate_engine_android_exports",
     ],
 
-    static_libs: ["libupdate_engine_android"],
+    static_libs: [
+        "libupdate_engine_android",
+        "libgflags",
+    ],
     required: [
         "cacerts_google",
         "otacerts",
@@ -436,9 +587,6 @@
     recovery: true,
 
     cflags: ["-D_UE_SIDELOAD"],
-    // TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
-    // out of the DBus interface.
-    include_dirs: ["external/cros/system_api/dbus"],
     header_libs: ["libgtest_prod_headers"],
 
     srcs: [
@@ -472,7 +620,7 @@
         // We add the static versions of the shared libraries that are not installed to
         // recovery image due to size concerns. Need to include all the static library
         // dependencies of these static libraries.
-        "gkiprops",
+        "libgkiprops",
         "libevent",
         "libmodpb64",
         "libprotobuf-cpp-lite",
@@ -503,10 +651,6 @@
     name: "update_engine_client",
     defaults: ["ue_defaults"],
 
-    // TODO(deymo): Remove external/cros/system_api/dbus once the strings are moved
-    // out of the DBus interface.
-    include_dirs: ["external/cros/system_api/dbus"],
-
     shared_libs: [
         "libbinder",
         "libbinderwrapper",
@@ -556,8 +700,10 @@
         "libpayload_extent_utils",
         "libcow_size_estimator",
         "liberofs",
+        "libselinux",
         "lz4diff-protos",
         "liblz4diff",
+        "libzstd",
     ],
     shared_libs: [
         "libbase",
@@ -715,6 +861,7 @@
         "libavb_host_sysdeps",
         "libpayload_consumer",
         "libpayload_generator",
+        "libgflags",
     ],
 
     srcs: ["payload_generator/generate_delta_main.cc"],
@@ -732,6 +879,7 @@
     static_libs: [
         "libpayload_consumer",
         "libpayload_generator",
+        "libgflags",
     ],
 
     srcs: ["payload_generator/generate_delta_main.cc"],
@@ -927,6 +1075,7 @@
         "libdm",
         "libgmock",
         "libz",
+        "libzstd",
     ],
     shared_libs: [
         "libssl",
@@ -965,17 +1114,14 @@
         "common/subprocess.cc",
         "common/test_utils.cc",
         "common/utils.cc",
-        "common/proxy_resolver.cc",
         "libcurl_http_fetcher.cc",
         "payload_consumer/certificate_parser_android.cc",
         "payload_consumer/payload_verifier.cc",
         "payload_generator/payload_signer.cc",
         "update_status_utils.cc",
-
         "certificate_checker_unittest.cc",
         "common/http_fetcher_unittest.cc",
         "common/mock_http_fetcher.cc",
-        "common/proxy_resolver_unittest.cc",
         "common/subprocess_unittest.cc",
         "libcurl_http_fetcher_unittest.cc",
         "payload_consumer/certificate_parser_android_unittest.cc",
@@ -1123,6 +1269,7 @@
         "libbase",
         "libcow_operation_convert",
         "libcow_size_estimator",
+        "libgflags",
         "libpayload_consumer",
         "libpayload_extent_ranges",
         "libpayload_extent_utils",
diff --git a/aosp/apex_handler_android.cc b/aosp/apex_handler_android.cc
index 8beef96..acc6bec 100644
--- a/aosp/apex_handler_android.cc
+++ b/aosp/apex_handler_android.cc
@@ -22,7 +22,6 @@
 #include <ApexProperties.sysprop.h>
 
 #include "update_engine/aosp/apex_handler_android.h"
-#include "update_engine/common/utils.h"
 
 namespace chromeos_update_engine {
 
@@ -47,7 +46,8 @@
 
 }  // namespace
 
-std::unique_ptr<ApexHandlerInterface> CreateApexHandler() {
+std::unique_ptr<ApexHandlerInterface>
+ApexHandlerInterface::CreateApexHandler() {
   if (android::sysprop::ApexProperties::updatable().value_or(false)) {
     return std::make_unique<ApexHandlerAndroid>();
   } else {
@@ -65,7 +65,7 @@
   }
 
   auto compressed_apex_info_list = CreateCompressedApexInfoList(apex_infos);
-  int64_t size_from_apexd;
+  int64_t size_from_apexd = 0;
   auto result = apex_service->calculateSizeForCompressedApex(
       compressed_apex_info_list, &size_from_apexd);
   if (!result.isOk()) {
diff --git a/aosp/apex_handler_android.h b/aosp/apex_handler_android.h
index 767f561..5aaf49b 100644
--- a/aosp/apex_handler_android.h
+++ b/aosp/apex_handler_android.h
@@ -29,7 +29,6 @@
 
 namespace chromeos_update_engine {
 
-std::unique_ptr<ApexHandlerInterface> CreateApexHandler();
 
 class ApexHandlerAndroid : virtual public ApexHandlerInterface {
  public:
diff --git a/aosp/apex_handler_android_unittest.cc b/aosp/apex_handler_android_unittest.cc
index 8c58e47..dd96256 100644
--- a/aosp/apex_handler_android_unittest.cc
+++ b/aosp/apex_handler_android_unittest.cc
@@ -38,7 +38,7 @@
   result.set_version(version);
   result.set_is_compressed(is_compressed);
   result.set_decompressed_size(decompressed_size);
-  return std::move(result);
+  return result;
 }
 
 TEST(ApexHandlerAndroidTest, CalculateSizeUpdatableApex) {
diff --git a/aosp/apex_handler_interface.h b/aosp/apex_handler_interface.h
index b9b6c96..a0c1e9f 100644
--- a/aosp/apex_handler_interface.h
+++ b/aosp/apex_handler_interface.h
@@ -31,6 +31,7 @@
   virtual android::base::Result<uint64_t> CalculateSize(
       const std::vector<ApexInfo>& apex_infos) const = 0;
   virtual bool AllocateSpace(const std::vector<ApexInfo>& apex_infos) const = 0;
+  static std::unique_ptr<ApexHandlerInterface> CreateApexHandler();
 };
 
 }  // namespace chromeos_update_engine
diff --git a/aosp/boot_control_android.cc b/aosp/boot_control_android.cc
index 88a9c17..0a1d3de 100644
--- a/aosp/boot_control_android.cc
+++ b/aosp/boot_control_android.cc
@@ -20,28 +20,19 @@
 #include <utility>
 #include <vector>
 
-#include <android/hardware/boot/1.2/IBootControl.h>
 #include <base/bind.h>
 #include <base/logging.h>
 #include <bootloader_message/bootloader_message.h>
 #include <brillo/message_loops/message_loop.h>
 
 #include "update_engine/aosp/dynamic_partition_control_android.h"
-#include "update_engine/common/utils.h"
 
 using std::string;
 
-using android::hardware::Return;
-using android::hardware::boot::V1_0::BoolResult;
-using android::hardware::boot::V1_0::CommandResult;
-using android::hardware::boot::V1_0::IBootControl;
 using Slot = chromeos_update_engine::BootControlInterface::Slot;
 
 namespace {
 
-auto StoreResultCallback(CommandResult* dest) {
-  return [dest](const CommandResult& result) { *dest = result; };
-}
 }  // namespace
 
 namespace chromeos_update_engine {
@@ -59,14 +50,18 @@
 
 }  // namespace boot_control
 
+using android::hal::BootControlClient;
+using android::hal::CommandResult;
+using android::hal::BootControlVersion;
+
 bool BootControlAndroid::Init() {
-  module_ = IBootControl::getService();
+  module_ = BootControlClient::WaitForService();
   if (module_ == nullptr) {
-    LOG(ERROR) << "Error getting bootctrl HIDL module.";
+    LOG(ERROR) << "Error getting bootctrl module.";
     return false;
   }
 
-  LOG(INFO) << "Loaded boot control hidl hal.";
+  LOG(INFO) << "Loaded boot control hal.";
 
   dynamic_control_ =
       std::make_unique<DynamicPartitionControlAndroid>(GetCurrentSlot());
@@ -75,11 +70,11 @@
 }
 
 unsigned int BootControlAndroid::GetNumSlots() const {
-  return module_->getNumberSlots();
+  return module_->GetNumSlots();
 }
 
 BootControlInterface::Slot BootControlAndroid::GetCurrentSlot() const {
-  return module_->getCurrentSlot();
+  return module_->GetCurrentSlot();
 }
 
 bool BootControlAndroid::GetPartitionDevice(const std::string& partition_name,
@@ -103,40 +98,30 @@
 }
 
 bool BootControlAndroid::IsSlotBootable(Slot slot) const {
-  Return<BoolResult> ret = module_->isSlotBootable(slot);
-  if (!ret.isOk()) {
+  const auto ret = module_->IsSlotBootable(slot);
+  if (!ret.has_value()) {
     LOG(ERROR) << "Unable to determine if slot " << SlotName(slot)
-               << " is bootable: " << ret.description();
+               << " is bootable";
     return false;
   }
-  if (ret == BoolResult::INVALID_SLOT) {
-    LOG(ERROR) << "Invalid slot: " << SlotName(slot);
-    return false;
-  }
-  return ret == BoolResult::TRUE;
+  return ret.value();
 }
 
 bool BootControlAndroid::MarkSlotUnbootable(Slot slot) {
-  CommandResult result;
-  auto ret = module_->setSlotAsUnbootable(slot, StoreResultCallback(&result));
-  if (!ret.isOk()) {
+  const auto ret = module_->MarkSlotUnbootable(slot);
+  if (!ret.IsOk()) {
     LOG(ERROR) << "Unable to call MarkSlotUnbootable for slot "
-               << SlotName(slot) << ": " << ret.description();
+               << SlotName(slot) << ": " << ret.errMsg;
     return false;
   }
-  if (!result.success) {
-    LOG(ERROR) << "Unable to mark slot " << SlotName(slot)
-               << " as unbootable: " << result.errMsg.c_str();
-  }
-  return result.success;
+  return ret.success;
 }
 
 bool BootControlAndroid::SetActiveBootSlot(Slot slot) {
-  CommandResult result;
-  auto ret = module_->setActiveBootSlot(slot, StoreResultCallback(&result));
-  if (!ret.isOk()) {
+  const auto result = module_->SetActiveBootSlot(slot);
+  if (!result.IsOk()) {
     LOG(ERROR) << "Unable to call SetActiveBootSlot for slot " << SlotName(slot)
-               << ": " << ret.description();
+               << ": " << result.errMsg;
     return false;
   }
   if (!result.success) {
@@ -148,42 +133,31 @@
 
 bool BootControlAndroid::MarkBootSuccessfulAsync(
     base::Callback<void(bool)> callback) {
-  CommandResult result;
-  auto ret = module_->markBootSuccessful(StoreResultCallback(&result));
-  if (!ret.isOk()) {
-    LOG(ERROR) << "Unable to call MarkBootSuccessful: " << ret.description();
+  auto ret = module_->MarkBootSuccessful();
+  if (!ret.IsOk()) {
+    LOG(ERROR) << "Unable to MarkBootSuccessful: " << ret.errMsg;
     return false;
   }
-  if (!result.success) {
-    LOG(ERROR) << "Unable to mark boot successful: " << result.errMsg.c_str();
-  }
   return brillo::MessageLoop::current()->PostTask(
-             FROM_HERE, base::Bind(callback, result.success)) !=
+             FROM_HERE, base::Bind(callback, ret.success)) !=
          brillo::MessageLoop::kTaskIdNull;
 }
 
 bool BootControlAndroid::IsSlotMarkedSuccessful(
     BootControlInterface::Slot slot) const {
-  Return<BoolResult> ret = module_->isSlotMarkedSuccessful(slot);
+  const auto ret = module_->IsSlotMarkedSuccessful(slot);
   CommandResult result;
-  if (!ret.isOk()) {
+  if (!ret.has_value()) {
     LOG(ERROR) << "Unable to determine if slot " << SlotName(slot)
-               << " is marked successful: " << ret.description();
+               << " is marked successful";
     return false;
   }
-  if (ret == BoolResult::INVALID_SLOT) {
-    LOG(ERROR) << "Invalid slot: " << SlotName(slot);
-    return false;
-  }
-  return ret == BoolResult::TRUE;
+  return ret.value();
 }
 
 Slot BootControlAndroid::GetActiveBootSlot() {
-  namespace V1_2 = android::hardware::boot::V1_2;
-  using android::sp;
-  sp<V1_2::IBootControl> v1_2_module = V1_2::IBootControl::castFrom(module_);
-  if (v1_2_module != nullptr) {
-    return v1_2_module->getActiveBootSlot();
+  if (module_->GetVersion() >= android::hal::BootControlVersion::BOOTCTL_V1_2) {
+    return module_->GetActiveBootSlot();
   }
   LOG(WARNING) << "BootControl module version is lower than 1.2, "
                << __FUNCTION__ << " failed";
diff --git a/aosp/boot_control_android.h b/aosp/boot_control_android.h
index 9012032..51923e2 100644
--- a/aosp/boot_control_android.h
+++ b/aosp/boot_control_android.h
@@ -21,12 +21,12 @@
 #include <memory>
 #include <string>
 
-#include <android/hardware/boot/1.0/IBootControl.h>
 #include <liblp/builder.h>
 #include <gtest/gtest_prod.h>
+#include <BootControlClient.h>
 
 #include "update_engine/aosp/dynamic_partition_control_android.h"
-#include "update_engine/common/boot_control.h"
+#include "update_engine/common/boot_control_interface.h"
 #include "update_engine/common/dynamic_partition_control_interface.h"
 
 namespace chromeos_update_engine {
@@ -67,7 +67,7 @@
   DynamicPartitionControlInterface* GetDynamicPartitionControl() override;
 
  private:
-  ::android::sp<::android::hardware::boot::V1_0::IBootControl> module_;
+  std::unique_ptr<android::hal::BootControlClient> module_;
   std::unique_ptr<DynamicPartitionControlAndroid> dynamic_control_;
 
   friend class BootControlAndroidTest;
diff --git a/aosp/cleanup_previous_update_action.cc b/aosp/cleanup_previous_update_action.cc
index 55dba1e..7b0c9bb 100644
--- a/aosp/cleanup_previous_update_action.cc
+++ b/aosp/cleanup_previous_update_action.cc
@@ -25,7 +25,7 @@
 #include <base/bind.h>
 
 #ifndef __ANDROID_RECOVERY__
-#include <statslog.h>
+#include <statslog_ue.h>
 #endif
 
 #include "update_engine/common/utils.h"
@@ -277,7 +277,7 @@
   AcknowledgeTaskExecuted();
   TEST_AND_RETURN(running_);
 
-  auto update_uses_compression = snapshot_->UpdateUsesCompression();
+  snapshot_->SetMergeStatsFeatures(merge_stats_);
 
   // Propagate the merge failure code to the merge stats. If we wait until
   // after ProcessUpdateState, then a successful merge could overwrite the
@@ -290,7 +290,7 @@
   auto state = snapshot_->ProcessUpdateState(
       std::bind(&CleanupPreviousUpdateAction::OnMergePercentageUpdate, this),
       std::bind(&CleanupPreviousUpdateAction::BeforeCancel, this));
-  merge_stats_->set_state(state, update_uses_compression);
+  merge_stats_->set_state(state);
 
   switch (state) {
     case UpdateState::None: {
@@ -328,6 +328,7 @@
 
     case UpdateState::MergeCompleted: {
       LOG(INFO) << "Merge finished with state MergeCompleted.";
+      boot_control_->MarkSlotUnbootable(1 - boot_control_->GetCurrentSlot());
       processor_->ActionComplete(this, ErrorCode::kSuccess);
       return;
     }
@@ -378,10 +379,7 @@
 }
 
 bool CleanupPreviousUpdateAction::BeforeCancel() {
-  if (DeltaPerformer::ResetUpdateProgress(
-          prefs_,
-          false /* quick */,
-          false /* skip dynamic partitions metadata*/)) {
+  if (DeltaPerformer::ResetUpdateProgress(prefs_, false /* quick */)) {
     return true;
   }
 
@@ -434,7 +432,7 @@
 
   LOG(WARNING) << "InitiateMerge failed.";
   auto state = snapshot_->GetUpdateState();
-  merge_stats_->set_state(state, snapshot_->UpdateUsesCompression());
+  merge_stats_->set_state(state);
   if (state == UpdateState::Unverified) {
     // We are stuck at unverified state. This can happen if the update has
     // been applied, but it has not even been attempted yet (in libsnapshot,
@@ -494,6 +492,16 @@
   // DynamicPartitionControlInterface::UpdateUsesSnapshotCompression.
   // However, we have saved the flag in the snapshot report.
   bool vab_compression_used = report.compression_enabled();
+  bool userspace_snapshots_enabled =
+      boot_control_->GetDynamicPartitionControl()
+          ->GetVirtualAbUserspaceSnapshotsFeatureFlag()
+          .IsEnabled();
+  bool userspace_snapshots_used = report.userspace_snapshots_used();
+  bool xor_compression_enabled = boot_control_->GetDynamicPartitionControl()
+                                     ->GetVirtualAbCompressionXorFeatureFlag()
+                                     .IsEnabled();
+  bool xor_compression_used = report.xor_compression_used();
+  bool iouring_used = report.iouring_used();
 
   auto target_build_fingerprint =
       android::base::GetProperty("ro.build.fingerprint", "");
@@ -503,21 +511,26 @@
             << passed_ms.count() << "ms (resumed " << report.resume_count()
             << " times), using " << report.cow_file_size()
             << " bytes of COW image.";
-  android::util::stats_write(android::util::SNAPSHOT_MERGE_REPORTED,
-                             static_cast<int32_t>(report.state()),
-                             static_cast<int64_t>(passed_ms.count()),
-                             static_cast<int32_t>(report.resume_count()),
-                             vab_retrofit,
-                             static_cast<int64_t>(report.cow_file_size()),
-                             vab_compression_enabled,
-                             vab_compression_used,
-                             report.total_cow_size_bytes(),
-                             report.estimated_cow_size_bytes(),
-                             report.boot_complete_time_ms(),
-                             report.boot_complete_to_merge_start_time_ms(),
-                             static_cast<int32_t>(report.merge_failure_code()),
-                             report.source_build_fingerprint().c_str(),
-                             target_build_fingerprint.c_str());
+  statsd::stats_write(statsd::SNAPSHOT_MERGE_REPORTED,
+                      static_cast<int32_t>(report.state()),
+                      static_cast<int64_t>(passed_ms.count()),
+                      static_cast<int32_t>(report.resume_count()),
+                      vab_retrofit,
+                      static_cast<int64_t>(report.cow_file_size()),
+                      vab_compression_enabled,
+                      vab_compression_used,
+                      report.total_cow_size_bytes(),
+                      report.estimated_cow_size_bytes(),
+                      report.boot_complete_time_ms(),
+                      report.boot_complete_to_merge_start_time_ms(),
+                      static_cast<int32_t>(report.merge_failure_code()),
+                      report.source_build_fingerprint().c_str(),
+                      target_build_fingerprint.c_str(),
+                      userspace_snapshots_enabled,
+                      userspace_snapshots_used,
+                      xor_compression_enabled,
+                      xor_compression_used,
+                      iouring_used);
 #endif
 }
 
diff --git a/aosp/cow_converter.cc b/aosp/cow_converter.cc
index 2f93e3a..b9ec00a 100644
--- a/aosp/cow_converter.cc
+++ b/aosp/cow_converter.cc
@@ -27,8 +27,8 @@
 
 #include <base/files/file_path.h>
 #include <libsnapshot/cow_writer.h>
+#include <gflags/gflags.h>
 
-#include "update_engine/common/cow_operation_convert.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/file_descriptor.h"
 #include "update_engine/payload_consumer/payload_metadata.h"
@@ -36,6 +36,10 @@
 #include "update_engine/update_metadata.pb.h"
 
 using android::snapshot::CowWriter;
+DEFINE_string(partitions,
+              "",
+              "Comma separated list of partitions to extract, leave empty for "
+              "extracting all partitions");
 
 namespace chromeos_update_engine {
 
@@ -77,7 +81,15 @@
 using chromeos_update_engine::MetadataParseResult;
 using chromeos_update_engine::PayloadMetadata;
 
-int main(int argc, const char* argv[]) {
+int main(int argc, char* argv[]) {
+  gflags::SetUsageMessage(
+      "A tool to extract device images from Android OTA packages");
+  gflags::ParseCommandLineFlags(&argc, &argv, true);
+  auto tokens = android::base::Tokenize(FLAGS_partitions, ",");
+  const std::set<std::string> partitions(
+      std::make_move_iterator(tokens.begin()),
+      std::make_move_iterator(tokens.end()));
+
   if (argc != 3) {
     printf("Usage: %s <payload.bin> <extracted target_file>\n", argv[0]);
     return -1;
@@ -131,6 +143,10 @@
     if (partition.estimate_cow_size() == 0) {
       continue;
     }
+    if (!partitions.empty() &&
+        partitions.count(partition.partition_name()) == 0) {
+      continue;
+    }
     LOG(INFO) << partition.partition_name();
     if (!ProcessPartition(partition, images_dir, manifest.block_size())) {
       return 6;
diff --git a/aosp/daemon_state_android.cc b/aosp/daemon_state_android.cc
index da49080..dd659ad 100644
--- a/aosp/daemon_state_android.cc
+++ b/aosp/daemon_state_android.cc
@@ -18,7 +18,7 @@
 
 #include <base/logging.h>
 
-#include "update_engine/aosp/apex_handler_android.h"
+#include "update_engine/aosp/apex_handler_interface.h"
 #include "update_engine/aosp/update_attempter_android.h"
 #include "update_engine/common/boot_control.h"
 #include "update_engine/common/boot_control_stub.h"
@@ -65,11 +65,12 @@
   certificate_checker_->Init();
 
   // Initialize the UpdateAttempter before the UpdateManager.
-  update_attempter_.reset(new UpdateAttempterAndroid(this,
-                                                     prefs_.get(),
-                                                     boot_control_.get(),
-                                                     hardware_.get(),
-                                                     CreateApexHandler()));
+  update_attempter_.reset(
+      new UpdateAttempterAndroid(this,
+                                 prefs_.get(),
+                                 boot_control_.get(),
+                                 hardware_.get(),
+                                 ApexHandlerInterface::CreateApexHandler()));
 
   return true;
 }
diff --git a/aosp/dynamic_partition_control_android.cc b/aosp/dynamic_partition_control_android.cc
index 27d1d54..ca3473c 100644
--- a/aosp/dynamic_partition_control_android.cc
+++ b/aosp/dynamic_partition_control_android.cc
@@ -84,6 +84,8 @@
     "ro.virtual_ab.compression.enabled";
 constexpr auto&& kVirtualAbCompressionXorEnabled =
     "ro.virtual_ab.compression.xor.enabled";
+constexpr char kVirtualAbUserspaceSnapshotsEnabled[] =
+    "ro.virtual_ab.userspace.snapshots.enabled";
 
 // Currently, android doesn't have a retrofit prop for VAB Compression. However,
 // struct FeatureFlag forces us to determine if a feature is 'retrofit'. So this
@@ -98,7 +100,8 @@
 constexpr std::chrono::milliseconds kMapSnapshotTimeout{10000};
 
 DynamicPartitionControlAndroid::~DynamicPartitionControlAndroid() {
-  Cleanup();
+  UnmapAllPartitions();
+  metadata_device_.reset();
 }
 
 static FeatureFlag GetFeatureFlag(const char* enable_prop,
@@ -130,6 +133,8 @@
                                              kVirtualAbCompressionRetrofit)),
       virtual_ab_compression_xor_(
           GetFeatureFlag(kVirtualAbCompressionXorEnabled, "")),
+      virtual_ab_userspace_snapshots_(
+          GetFeatureFlag(kVirtualAbUserspaceSnapshotsEnabled, nullptr)),
       source_slot_(source_slot) {
   if (GetVirtualAbFeatureFlag().IsEnabled()) {
     snapshot_ = SnapshotManager::New();
@@ -312,6 +317,12 @@
 void DynamicPartitionControlAndroid::Cleanup() {
   UnmapAllPartitions();
   metadata_device_.reset();
+  if (GetVirtualAbFeatureFlag().IsEnabled()) {
+    snapshot_ = SnapshotManager::New();
+  } else {
+    snapshot_ = SnapshotManagerStub::New();
+  }
+  CHECK(snapshot_ != nullptr) << "Cannot initialize SnapshotManager.";
 }
 
 bool DynamicPartitionControlAndroid::DeviceExists(const std::string& path) {
@@ -1303,8 +1314,8 @@
   // ResetUpdateProgress may pass but CancelUpdate fails.
   // This is expected. A scheduled CleanupPreviousUpdateAction should free
   // space when it is done.
-  TEST_AND_RETURN_FALSE(DeltaPerformer::ResetUpdateProgress(
-      prefs, false /* quick */, false /* skip dynamic partitions metadata */));
+  TEST_AND_RETURN_FALSE(
+      DeltaPerformer::ResetUpdateProgress(prefs, false /* quick */));
 
   if (ExpectMetadataMounted()) {
     TEST_AND_RETURN_FALSE(snapshot_->CancelUpdate());
@@ -1417,7 +1428,7 @@
 DynamicPartitionControlAndroid::OpenCowWriter(
     const std::string& partition_name,
     const std::optional<std::string>& source_path,
-    bool is_append) {
+    bool) {
   auto suffix = SlotSuffixForSlotNumber(target_slot_);
 
   auto super_device = GetSuperDevice();
@@ -1496,4 +1507,9 @@
          snapshot_->UpdateUsesCompression();
 }
 
+FeatureFlag
+DynamicPartitionControlAndroid::GetVirtualAbUserspaceSnapshotsFeatureFlag() {
+  return virtual_ab_userspace_snapshots_;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/aosp/dynamic_partition_control_android.h b/aosp/dynamic_partition_control_android.h
index 92761d2..9851a99 100644
--- a/aosp/dynamic_partition_control_android.h
+++ b/aosp/dynamic_partition_control_android.h
@@ -45,6 +45,7 @@
   FeatureFlag GetVirtualAbFeatureFlag() override;
   FeatureFlag GetVirtualAbCompressionFeatureFlag() override;
   FeatureFlag GetVirtualAbCompressionXorFeatureFlag() override;
+  FeatureFlag GetVirtualAbUserspaceSnapshotsFeatureFlag() override;
   bool OptimizeOperation(const std::string& partition_name,
                          const InstallOperation& operation,
                          InstallOperation* optimized) override;
@@ -341,6 +342,7 @@
   const FeatureFlag virtual_ab_;
   const FeatureFlag virtual_ab_compression_;
   const FeatureFlag virtual_ab_compression_xor_;
+  const FeatureFlag virtual_ab_userspace_snapshots_;
   std::unique_ptr<android::snapshot::ISnapshotManager> snapshot_;
   std::unique_ptr<android::snapshot::AutoDevice> metadata_device_;
   bool target_supports_snapshot_ = false;
diff --git a/aosp/logging_android.cc b/aosp/logging_android.cc
index 5ccf7bc..5940f78 100644
--- a/aosp/logging_android.cc
+++ b/aosp/logging_android.cc
@@ -22,6 +22,7 @@
 #include <algorithm>
 #include <functional>
 #include <iomanip>
+#include <sstream>
 #include <string>
 #include <string_view>
 #include <vector>
@@ -35,6 +36,7 @@
 #include <base/strings/stringprintf.h>
 #include <log/log.h>
 
+#include "android/log.h"
 #include "update_engine/common/utils.h"
 
 using std::string;
@@ -204,8 +206,23 @@
     }
   }
   void operator()(const struct __android_log_message* log_message) {
-    for (auto&& logger : loggers_) {
-      logger(log_message);
+    if (log_message->file != nullptr && log_message->line != 0) {
+      __android_log_message formatted = *log_message;
+      std::stringstream ss;
+      ss << "[" << LogPriorityToCString(formatted.priority) << ":"
+         << formatted.file << "(" << formatted.line << ")] "
+         << formatted.message;
+      formatted.file = nullptr;
+      formatted.line = 0;
+      const auto str = ss.str();
+      formatted.message = str.c_str();
+      for (auto&& logger : loggers_) {
+        logger(&formatted);
+      }
+    } else {
+      for (auto&& logger : loggers_) {
+        logger(log_message);
+      }
     }
   }
 
@@ -248,7 +265,17 @@
   } else {
     // This will eventually be redirected to CombinedLogger.
     // Use nullptr as tag so that liblog infers log tag from getprogname().
-    __android_log_write(priority, nullptr /* tag */, str.c_str());
+    if (file == nullptr || file[0] == 0 || line == 0 || message_start != 0) {
+      __android_log_write(priority, nullptr /* tag */, str.c_str());
+    } else {
+      __android_log_print(priority,
+                          nullptr,
+                          "[%s:%s(%d)] %s",
+                          LogPriorityToCString(priority),
+                          file,
+                          line,
+                          str.c_str());
+    }
   }
   return true;
 }
diff --git a/aosp/metrics_reporter_android.cc b/aosp/metrics_reporter_android.cc
index a324fab..d974616 100644
--- a/aosp/metrics_reporter_android.cc
+++ b/aosp/metrics_reporter_android.cc
@@ -29,7 +29,7 @@
 #include <libdm/dm.h>
 #include <liblp/builder.h>
 #include <liblp/liblp.h>
-#include <statslog.h>
+#include <statslog_ue.h>
 
 #include "update_engine/common/constants.h"
 #include "update_engine/payload_consumer/install_plan.h"
@@ -132,8 +132,8 @@
   bool vab_compression_used =
       dynamic_partition_control_->UpdateUsesSnapshotCompression();
 
-  android::util::stats_write(
-      android::util::UPDATE_ENGINE_UPDATE_ATTEMPT_REPORTED,
+  statsd::stats_write(
+      statsd::UPDATE_ENGINE_UPDATE_ATTEMPT_REPORTED,
       attempt_number,
       GetStatsdEnumValue(static_cast<int32_t>(payload_type)),
       duration.InMinutes(),
@@ -177,17 +177,16 @@
     total_bytes_downloaded += num_bytes_downloaded[i] / kNumBytesInOneMiB;
   }
 
-  android::util::stats_write(
-      android::util::UPDATE_ENGINE_SUCCESSFUL_UPDATE_REPORTED,
-      static_cast<int32_t>(attempt_count),
-      GetStatsdEnumValue(static_cast<int32_t>(payload_type)),
-      static_cast<int32_t>(payload_size_mib),
-      static_cast<int32_t>(total_bytes_downloaded),
-      static_cast<int32_t>(download_overhead_percentage),
-      static_cast<int32_t>(total_duration.InMinutes()),
-      static_cast<int32_t>(reboot_count),
-      IsHashTreeEnabled(install_plan_),
-      IsFECEnabled(install_plan_));
+  statsd::stats_write(statsd::UPDATE_ENGINE_SUCCESSFUL_UPDATE_REPORTED,
+                      static_cast<int32_t>(attempt_count),
+                      GetStatsdEnumValue(static_cast<int32_t>(payload_type)),
+                      static_cast<int32_t>(payload_size_mib),
+                      static_cast<int32_t>(total_bytes_downloaded),
+                      static_cast<int32_t>(download_overhead_percentage),
+                      static_cast<int32_t>(total_duration.InMinutes()),
+                      static_cast<int32_t>(reboot_count),
+                      IsHashTreeEnabled(install_plan_),
+                      IsFECEnabled(install_plan_));
 }
 
 void MetricsReporterAndroid::ReportAbnormallyTerminatedUpdateAttemptMetrics() {
diff --git a/aosp/mock_dynamic_partition_control_android.h b/aosp/mock_dynamic_partition_control_android.h
index f55cdf7..e940e50 100644
--- a/aosp/mock_dynamic_partition_control_android.h
+++ b/aosp/mock_dynamic_partition_control_android.h
@@ -77,6 +77,10 @@
               GetVirtualAbCompressionXorFeatureFlag,
               (),
               (override));
+  MOCK_METHOD(FeatureFlag,
+              GetVirtualAbUserspaceSnapshotsFeatureFlag,
+              (),
+              (override));
   MOCK_METHOD(bool, FinishUpdate, (bool), (override));
   MOCK_METHOD(bool,
               GetSystemOtherPath,
diff --git a/aosp/update_attempter_android.cc b/aosp/update_attempter_android.cc
index a3485ea..5628109 100644
--- a/aosp/update_attempter_android.cc
+++ b/aosp/update_attempter_android.cc
@@ -21,6 +21,7 @@
 #include <memory>
 #include <ostream>
 #include <utility>
+#include <vector>
 
 #include <android-base/properties.h>
 #include <android-base/unique_fd.h>
@@ -33,25 +34,28 @@
 #include <log/log_safetynet.h>
 
 #include "update_engine/aosp/cleanup_previous_update_action.h"
+#include "update_engine/common/clock.h"
 #include "update_engine/common/constants.h"
 #include "update_engine/common/daemon_state_interface.h"
 #include "update_engine/common/download_action.h"
+#include "update_engine/common/error_code.h"
 #include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/file_fetcher.h"
 #include "update_engine/common/metrics_reporter_interface.h"
 #include "update_engine/common/network_selector.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/metrics_utils.h"
-#include "update_engine/payload_consumer/certificate_parser_interface.h"
 #include "update_engine/payload_consumer/delta_performer.h"
 #include "update_engine/payload_consumer/file_descriptor.h"
 #include "update_engine/payload_consumer/file_descriptor_utils.h"
 #include "update_engine/payload_consumer/filesystem_verifier_action.h"
+#include "update_engine/payload_consumer/partition_writer.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_metadata.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/payload_consumer/postinstall_runner_action.h"
 #include "update_engine/update_boot_flags_action.h"
+#include "update_engine/update_status.h"
 #include "update_engine/update_status_utils.h"
 
 #ifndef _UE_SIDELOAD
@@ -141,7 +145,9 @@
       hardware_(hardware),
       apex_handler_android_(std::move(apex_handler)),
       processor_(new ActionProcessor()),
-      clock_(new Clock()) {
+      clock_(new Clock()),
+      metric_bytes_downloaded_(kPrefsCurrentBytesDownloaded, prefs_),
+      metric_total_bytes_downloaded_(kPrefsTotalBytesDownloaded, prefs_) {
   metrics_reporter_ = metrics::CreateMetricsReporter(
       boot_control_->GetDynamicPartitionControl(), &install_plan_);
   network_selector_ = network::CreateNetworkSelector();
@@ -263,16 +269,9 @@
   install_plan_.is_resume = !payload_id.empty() &&
                             DeltaPerformer::CanResumeUpdate(prefs_, payload_id);
   if (!install_plan_.is_resume) {
-    // No need to reset dynamic_partititon_metadata_updated. If previous calls
-    // to AllocateSpaceForPayload uses the same payload_id, reuse preallocated
-    // space. Otherwise, DeltaPerformer re-allocates space when the payload is
-    // applied.
-    if (!DeltaPerformer::ResetUpdateProgress(
-            prefs_,
-            false /* quick */,
-            true /* skip_dynamic_partititon_metadata_updated */)) {
-      LOG(WARNING) << "Unable to reset the update progress.";
-    }
+    boot_control_->GetDynamicPartitionControl()->Cleanup();
+    boot_control_->GetDynamicPartitionControl()->ResetUpdate(prefs_);
+
     if (!prefs_->SetString(kPrefsUpdateCheckResponseHash, payload_id)) {
       LOG(WARNING) << "Unable to save the update check response hash.";
     }
@@ -283,6 +282,11 @@
   install_plan_.powerwash_required =
       GetHeaderAsBool(headers[kPayloadPropertyPowerwash], false);
 
+  if (!IsProductionBuild()) {
+    install_plan_.disable_vabc =
+        GetHeaderAsBool(headers[kPayloadDisableVABC], false);
+  }
+
   install_plan_.switch_slot_on_reboot =
       GetHeaderAsBool(headers[kPayloadPropertySwitchSlotOnReboot], true);
 
@@ -326,9 +330,14 @@
   } else {
 #ifdef _UE_SIDELOAD
     LOG(FATAL) << "Unsupported sideload URI: " << payload_url;
+    return false;  // NOLINT, unreached but analyzer might not know.
+                   // Suppress warnings about null 'fetcher' after this.
 #else
-    LibcurlHttpFetcher* libcurl_fetcher =
-        new LibcurlHttpFetcher(&proxy_resolver_, hardware_);
+    LibcurlHttpFetcher* libcurl_fetcher = new LibcurlHttpFetcher(hardware_);
+    if (!headers[kPayloadDownloadRetry].empty()) {
+      libcurl_fetcher->set_max_retry_count(
+          atoi(headers[kPayloadDownloadRetry].c_str()));
+    }
     libcurl_fetcher->set_server_to_check(ServerToCheck::kDownload);
     fetcher = libcurl_fetcher;
 #endif  // _UE_SIDELOAD
@@ -339,6 +348,21 @@
   if (!headers[kPayloadPropertyUserAgent].empty())
     fetcher->SetHeader("User-Agent", headers[kPayloadPropertyUserAgent]);
 
+  if (!headers[kPayloadPropertyNetworkProxy].empty()) {
+    LOG(INFO) << "Using proxy url from payload headers: "
+              << headers[kPayloadPropertyNetworkProxy];
+    fetcher->SetProxies({headers[kPayloadPropertyNetworkProxy]});
+  }
+  if (!headers[kPayloadVABCNone].empty()) {
+    install_plan_.vabc_none = true;
+  }
+  if (!headers[kPayloadEnableThreading].empty()) {
+    install_plan_.enable_threading = true;
+  }
+  if (!headers[kPayloadBatchedWrites].empty()) {
+    install_plan_.batched_writes = true;
+  }
+
   BuildUpdateActions(fetcher);
 
   SetStatusAndNotify(UpdateStatus::UPDATE_AVAILABLE);
@@ -404,6 +428,13 @@
     return LogAndSetError(
         error, FROM_HERE, "Already processing an update, cancel it first.");
   }
+  if (status_ != UpdateStatus::IDLE &&
+      status_ != UpdateStatus::UPDATED_NEED_REBOOT) {
+    return LogAndSetError(error,
+                          FROM_HERE,
+                          "Status reset not allowed in this state, please "
+                          "cancel on going OTA first.");
+  }
 
   if (apex_handler_android_ != nullptr) {
     LOG(INFO) << "Cleaning up reserved space for compressed APEX (if any)";
@@ -419,30 +450,18 @@
                           "Failed to reset the status because "
                           "ClearUpdateCompletedMarker() failed");
   }
-
+  if (status_ == UpdateStatus::UPDATED_NEED_REBOOT) {
+    if (!resetShouldSwitchSlotOnReboot(error)) {
+      LOG(INFO) << "Failed to reset slot switch.";
+      return false;
+    }
+    LOG(INFO) << "Slot switch reset successful";
+  }
   if (!boot_control_->GetDynamicPartitionControl()->ResetUpdate(prefs_)) {
     LOG(WARNING) << "Failed to reset snapshots. UpdateStatus is IDLE but"
-                  << "space might not be freed.";
+                 << "space might not be freed.";
   }
-  switch (status_) {
-    case UpdateStatus::IDLE: {
-      return true;
-    }
-
-    case UpdateStatus::UPDATED_NEED_REBOOT: {
-      const bool ret_value = resetShouldSwitchSlotOnReboot(error);
-      if (ret_value) {
-        LOG(INFO) << "Reset status successful";
-      }
-      return ret_value;
-    }
-
-    default:
-      return LogAndSetError(
-          error,
-          FROM_HERE,
-          "Reset not allowed in this state. Cancel the ongoing update first");
-  }
+  return true;
 }
 
 bool UpdateAttempterAndroid::VerifyPayloadParseManifest(
@@ -461,7 +480,7 @@
         FROM_HERE,
         "Failed to read payload header from " + metadata_filename);
   }
-  ErrorCode errorcode;
+  ErrorCode errorcode{};
   PayloadMetadata payload_metadata;
   if (payload_metadata.ParsePayloadHeader(metadata, &errorcode) !=
       MetadataParseResult::kSuccess) {
@@ -520,7 +539,7 @@
       VerifyPayloadParseManifest(metadata_filename, &manifest, error));
 
   FileDescriptorPtr fd(new EintrSafeFileDescriptor);
-  ErrorCode errorcode;
+  ErrorCode errorcode{};
 
   BootControlInterface::Slot current_slot = GetCurrentSlot();
   for (const PartitionUpdate& partition : manifest.partitions()) {
@@ -562,7 +581,9 @@
 void UpdateAttempterAndroid::ProcessingDone(const ActionProcessor* processor,
                                             ErrorCode code) {
   LOG(INFO) << "Processing Done.";
-
+  metric_bytes_downloaded_.Flush(true);
+  metric_total_bytes_downloaded_.Flush(true);
+  last_error_ = code;
   if (status_ == UpdateStatus::CLEANUP_PREVIOUS_UPDATE) {
     TerminateUpdateAndNotify(code);
     return;
@@ -659,14 +680,8 @@
   }
 
   // Update the bytes downloaded in prefs.
-  int64_t current_bytes_downloaded =
-      metrics_utils::GetPersistedValue(kPrefsCurrentBytesDownloaded, prefs_);
-  int64_t total_bytes_downloaded =
-      metrics_utils::GetPersistedValue(kPrefsTotalBytesDownloaded, prefs_);
-  prefs_->SetInt64(kPrefsCurrentBytesDownloaded,
-                   current_bytes_downloaded + bytes_progressed);
-  prefs_->SetInt64(kPrefsTotalBytesDownloaded,
-                   total_bytes_downloaded + bytes_progressed);
+  metric_bytes_downloaded_ += bytes_progressed;
+  metric_total_bytes_downloaded_ += bytes_progressed;
 }
 
 bool UpdateAttempterAndroid::ShouldCancel(ErrorCode* cancel_reason) {
@@ -746,7 +761,7 @@
     prefs_->Delete(kPrefsPayloadAttemptNumber);
     metrics_utils::SetSystemUpdatedMarker(clock_.get(), prefs_);
     // Clear the total bytes downloaded if and only if the update succeeds.
-    prefs_->SetInt64(kPrefsTotalBytesDownloaded, 0);
+    metric_total_bytes_downloaded_.Delete();
   }
 }
 
@@ -806,9 +821,10 @@
 }
 
 bool UpdateAttempterAndroid::WriteUpdateCompletedMarker() {
-  LOG(INFO) << "Writing update complete marker.";
   string boot_id;
   TEST_AND_RETURN_FALSE(utils::GetBootId(&boot_id));
+  LOG(INFO) << "Writing update complete marker, slot "
+            << boot_control_->GetCurrentSlot() << ", boot id: " << boot_id;
   TEST_AND_RETURN_FALSE(
       prefs_->SetString(kPrefsUpdateCompletedOnBootId, boot_id));
   TEST_AND_RETURN_FALSE(
@@ -823,7 +839,7 @@
   return true;
 }
 
-bool UpdateAttempterAndroid::UpdateCompletedOnThisBoot() {
+bool UpdateAttempterAndroid::UpdateCompletedOnThisBoot() const {
   // In case of an update_engine restart without a reboot, we stored the boot_id
   // when the update was completed by setting a pref, so we can check whether
   // the last update was on this boot or a previous one.
@@ -874,8 +890,7 @@
       attempt_result,
       error_code);
 
-  int64_t current_bytes_downloaded =
-      metrics_utils::GetPersistedValue(kPrefsCurrentBytesDownloaded, prefs_);
+  int64_t current_bytes_downloaded = metric_bytes_downloaded_.get();
   metrics_reporter_->ReportUpdateAttemptDownloadMetrics(
       current_bytes_downloaded,
       0,
@@ -892,8 +907,7 @@
     // For android metrics, we only care about the total bytes downloaded
     // for all sources; for now we assume the only download source is
     // HttpsServer.
-    int64_t total_bytes_downloaded =
-        metrics_utils::GetPersistedValue(kPrefsTotalBytesDownloaded, prefs_);
+    int64_t total_bytes_downloaded = metric_total_bytes_downloaded_.get();
     int64_t num_bytes_downloaded[kNumDownloadSources] = {};
     num_bytes_downloaded[DownloadSource::kDownloadSourceHttpsServer] =
         total_bytes_downloaded;
@@ -933,7 +947,9 @@
       prefs_->GetString(kPrefsPreviousVersion, &previous_version));
   if (previous_slot != current_slot) {
     LOG(INFO) << "Detected a slot switch, OTA succeeded, device updated from "
-              << previous_version << " to " << current_version;
+              << previous_version << " to " << current_version
+              << ", previous slot: " << previous_slot
+              << " current slot: " << current_slot;
     if (previous_version == current_version) {
       LOG(INFO) << "Previous version is the same as current version, this is "
                    "possibly a self-OTA.";
@@ -955,7 +971,8 @@
   // We only set |kPrefsSystemUpdatedMarker| if slot is actually switched, so
   // existence of this pref is sufficient indicator. Given that we have to
   // delete this pref after checking it. This is done in
-  // |DeltaPerformer::ResetUpdateProgress|
+  // |DeltaPerformer::ResetUpdateProgress| and
+  // |UpdateAttempterAndroid::UpdateStateAfterReboot|
   auto slot_switch_attempted = prefs_->Exists(kPrefsUpdateCompletedOnBootId);
   auto system_rebooted = DidSystemReboot(prefs_);
   auto ota_successful = OTARebootSucceeded();
@@ -985,6 +1002,11 @@
   string current_boot_id;
   TEST_AND_RETURN(utils::GetBootId(&current_boot_id));
   prefs_->SetString(kPrefsBootId, current_boot_id);
+  std::string slot_switch_indicator;
+  prefs_->GetString(kPrefsUpdateCompletedOnBootId, &slot_switch_indicator);
+  if (slot_switch_indicator != current_boot_id) {
+    ClearUpdateCompletedMarker();
+  }
 
   // If there's no record of previous version (e.g. due to a data wipe), we
   // save the info of current boot and skip the metrics report.
@@ -1056,7 +1078,7 @@
 
 void UpdateAttempterAndroid::ClearMetricsPrefs() {
   CHECK(prefs_);
-  prefs_->Delete(kPrefsCurrentBytesDownloaded);
+  metric_bytes_downloaded_.Delete();
   prefs_->Delete(kPrefsNumReboots);
   prefs_->Delete(kPrefsSystemUpdatedMarker);
   prefs_->Delete(kPrefsUpdateTimestampStart);
@@ -1162,15 +1184,6 @@
   TEST_AND_RETURN_FALSE(
       VerifyPayloadParseManifest(metadata_filename, &manifest, error));
 
-  if (!boot_control_->GetDynamicPartitionControl()->PreparePartitionsForUpdate(
-          GetCurrentSlot(),
-          GetTargetSlot(),
-          manifest,
-          false /* should update */,
-          nullptr)) {
-    return LogAndSetError(
-        error, FROM_HERE, "Failed to PreparePartitionsForUpdate");
-  }
   InstallPlan install_plan_;
   install_plan_.source_slot = GetCurrentSlot();
   install_plan_.target_slot = GetTargetSlot();
@@ -1183,34 +1196,51 @@
   CHECK_NE(install_plan_.source_slot, UINT32_MAX);
   CHECK_NE(install_plan_.target_slot, UINT32_MAX);
 
-  ErrorCode error_code;
-  if (!install_plan_.ParsePartitions(manifest.partitions(),
-                                     boot_control_,
-                                     manifest.block_size(),
-                                     &error_code)) {
-    return LogAndSetError(error,
-                          FROM_HERE,
-                          "Failed to LoadPartitionsFromSlots " +
-                              utils::ErrorCodeToString(error_code));
-  }
-
   auto install_plan_action = std::make_unique<InstallPlanAction>(install_plan_);
-  auto filesystem_verifier_action = std::make_unique<FilesystemVerifierAction>(
-      boot_control_->GetDynamicPartitionControl());
   auto postinstall_runner_action =
       std::make_unique<PostinstallRunnerAction>(boot_control_, hardware_);
   SetStatusAndNotify(UpdateStatus::VERIFYING);
-  filesystem_verifier_action->set_delegate(this);
   postinstall_runner_action->set_delegate(this);
 
-  // Bond them together. We have to use the leaf-types when calling
-  // BondActions().
-  BondActions(install_plan_action.get(), filesystem_verifier_action.get());
-  BondActions(filesystem_verifier_action.get(),
-              postinstall_runner_action.get());
+  // If last error code is kUpdatedButNotActive, we know that we reached this
+  // state by calling applyPayload() with switch_slot=false. That applyPayload()
+  // call would have already performed filesystem verification, therefore, we
+  // can safely skip the verification to save time.
+  if (last_error_ == ErrorCode::kUpdatedButNotActive) {
+    BondActions(install_plan_action.get(), postinstall_runner_action.get());
+    processor_->EnqueueAction(std::move(install_plan_action));
+  } else {
+    if (!boot_control_->GetDynamicPartitionControl()
+             ->PreparePartitionsForUpdate(GetCurrentSlot(),
+                                          GetTargetSlot(),
+                                          manifest,
+                                          false /* should update */,
+                                          nullptr)) {
+      return LogAndSetError(
+          error, FROM_HERE, "Failed to PreparePartitionsForUpdate");
+    }
+    ErrorCode error_code{};
+    if (!install_plan_.ParsePartitions(manifest.partitions(),
+                                       boot_control_,
+                                       manifest.block_size(),
+                                       &error_code)) {
+      return LogAndSetError(error,
+                            FROM_HERE,
+                            "Failed to LoadPartitionsFromSlots " +
+                                utils::ErrorCodeToString(error_code));
+    }
 
-  processor_->EnqueueAction(std::move(install_plan_action));
-  processor_->EnqueueAction(std::move(filesystem_verifier_action));
+    auto filesystem_verifier_action =
+        std::make_unique<FilesystemVerifierAction>(
+            boot_control_->GetDynamicPartitionControl());
+    filesystem_verifier_action->set_delegate(this);
+    BondActions(install_plan_action.get(), filesystem_verifier_action.get());
+    BondActions(filesystem_verifier_action.get(),
+                postinstall_runner_action.get());
+    processor_->EnqueueAction(std::move(install_plan_action));
+    processor_->EnqueueAction(std::move(filesystem_verifier_action));
+  }
+
   processor_->EnqueueAction(std::move(postinstall_runner_action));
   ScheduleProcessingStart();
   return true;
@@ -1222,6 +1252,7 @@
     return LogAndSetError(
         error, FROM_HERE, "Already processing an update, cancel it first.");
   }
+  TEST_AND_RETURN_FALSE(ClearUpdateCompletedMarker());
   // Update the boot flags so the current slot has higher priority.
   if (!boot_control_->SetActiveBootSlot(GetCurrentSlot())) {
     return LogAndSetError(error, FROM_HERE, "Failed to SetActiveBootSlot");
@@ -1288,4 +1319,13 @@
       end_it, cleanup_previous_update_callbacks_.end());
 }
 
+bool UpdateAttempterAndroid::IsProductionBuild() {
+  if (android::base::GetProperty("ro.build.type", "") != "userdebug" ||
+      android::base::GetProperty("ro.build.tags", "") == "release-keys" ||
+      android::base::GetProperty("ro.boot.verifiedbootstate", "") == "green") {
+    return true;
+  }
+  return false;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/aosp/update_attempter_android.h b/aosp/update_attempter_android.h
index 5d832e0..c2226b2 100644
--- a/aosp/update_attempter_android.h
+++ b/aosp/update_attempter_android.h
@@ -31,14 +31,14 @@
 #include "update_engine/client_library/include/update_engine/update_status.h"
 #include "update_engine/common/action_processor.h"
 #include "update_engine/common/boot_control_interface.h"
-#include "update_engine/common/clock.h"
+#include "update_engine/common/clock_interface.h"
 #include "update_engine/common/daemon_state_interface.h"
 #include "update_engine/common/download_action.h"
+#include "update_engine/common/error_code.h"
 #include "update_engine/common/hardware_interface.h"
 #include "update_engine/common/metrics_reporter_interface.h"
 #include "update_engine/common/network_selector_interface.h"
 #include "update_engine/common/prefs_interface.h"
-#include "update_engine/common/service_observer_interface.h"
 #include "update_engine/metrics_utils.h"
 #include "update_engine/payload_consumer/filesystem_verifier_action.h"
 #include "update_engine/payload_consumer/postinstall_runner_action.h"
@@ -168,8 +168,8 @@
   // |update_completed_marker_| is empty.
   [[nodiscard]] bool WriteUpdateCompletedMarker();
 
-  // Returns whether an update was completed in the current boot.
-  [[nodiscard]] bool UpdateCompletedOnThisBoot();
+  // Returns whether a slot switch was attempted in the current boot.
+  [[nodiscard]] bool UpdateCompletedOnThisBoot() const;
 
   // Prefs to use for metrics report
   // |kPrefsPayloadAttemptNumber|: number of update attempts for the current
@@ -234,6 +234,8 @@
   void RemoveCleanupPreviousUpdateCallback(
       CleanupSuccessfulUpdateCallbackInterface* callback);
 
+  bool IsProductionBuild();
+
   DaemonStateInterface* daemon_state_;
 
   // DaemonStateAndroid pointers.
@@ -248,9 +250,6 @@
   // set back in the middle of an update.
   base::TimeTicks last_notify_time_;
 
-  // Only direct proxy supported.
-  DirectProxyResolver proxy_resolver_;
-
   // The processor for running Actions.
   std::unique_ptr<ActionProcessor> processor_;
 
@@ -281,6 +280,10 @@
 
   // The path to the zip file with X509 certificates.
   std::string update_certificates_path_{constants::kUpdateCertificatesPath};
+  ErrorCode last_error_{ErrorCode::kSuccess};
+
+  metrics_utils::PersistedValue<int64_t> metric_bytes_downloaded_;
+  metrics_utils::PersistedValue<int64_t> metric_total_bytes_downloaded_;
 
   DISALLOW_COPY_AND_ASSIGN(UpdateAttempterAndroid);
 };
diff --git a/aosp/update_attempter_android_unittest.cc b/aosp/update_attempter_android_unittest.cc
index 458c224..b97ae21 100644
--- a/aosp/update_attempter_android_unittest.cc
+++ b/aosp/update_attempter_android_unittest.cc
@@ -33,28 +33,18 @@
 #include <fs_mgr.h>
 #include <liblp/liblp.h>
 
-#include "update_engine/aosp/boot_control_android.h"
 #include "update_engine/aosp/daemon_state_android.h"
 #include "update_engine/common/constants.h"
 #include "update_engine/common/fake_boot_control.h"
 #include "update_engine/common/fake_clock.h"
 #include "update_engine/common/fake_hardware.h"
 #include "update_engine/common/fake_prefs.h"
-#include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/mock_action_processor.h"
 #include "update_engine/common/mock_metrics_reporter.h"
-#include "update_engine/common/prefs.h"
-#include "update_engine/common/test_utils.h"
-#include "update_engine/common/testing_constants.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/metrics_utils.h"
 #include "update_engine/payload_consumer/install_plan.h"
-#include "update_engine/payload_consumer/payload_constants.h"
-#include "update_engine/payload_generator/delta_diff_generator.h"
-#include "update_engine/payload_generator/extent_ranges.h"
-#include "update_engine/payload_generator/payload_file.h"
-#include "update_engine/payload_generator/payload_signer.h"
 #include "update_engine/update_metadata.pb.h"
-#include "update_engine/update_status_utils.h"
 
 using base::Time;
 using base::TimeDelta;
diff --git a/common/constants.h b/common/constants.h
index 8c07fcf..004d9d9 100644
--- a/common/constants.h
+++ b/common/constants.h
@@ -172,6 +172,23 @@
 // This can be used to zero-rate OTA traffic by sending it over the correct
 // network.
 static constexpr const auto& kPayloadPropertyNetworkId = "NETWORK_ID";
+
+// Proxy URL to use for downloading OTA. This will be forwarded to libcurl
+static constexpr const auto& kPayloadPropertyNetworkProxy = "NETWORK_PROXY";
+
+// Set Virtual AB Compression's compression algorithm to "none", but still use
+// userspace snapshots and snapuserd for update installation.
+static constexpr const auto& kPayloadVABCNone = "VABC_NONE";
+// Enable/Disable VABC, falls back on plain VAB
+static constexpr const auto& kPayloadDisableVABC = "DISABLE_VABC";
+// Enable multi-threaded compression for VABC
+static constexpr const auto& kPayloadEnableThreading = "ENABLE_THREADING";
+// Enable batched writes for VABC
+static constexpr const auto& kPayloadBatchedWrites = "BATCHED_WRITES";
+
+// Max retry count for download
+static constexpr const auto& kPayloadDownloadRetry = "DOWNLOAD_RETRY";
+
 // Set "SWITCH_SLOT_ON_REBOOT=0" to skip marking the updated partitions active.
 // The default is 1 (always switch slot if update succeeded).
 static constexpr const auto& kPayloadPropertySwitchSlotOnReboot =
@@ -191,6 +208,9 @@
 static constexpr const auto& kXGoogleUpdateUpdater = "X-Goog-Update-Updater";
 static constexpr const auto& kXGoogleUpdateSessionId = "X-Goog-SessionId";
 
+// Proxy URL for direction connection
+static constexpr const auto& kNoProxy = "direct://";
+
 // A download source is any combination of protocol and server (that's of
 // interest to us when looking at UMA metrics) using which we may download
 // the payload.
diff --git a/common/cow_operation_convert.cc b/common/cow_operation_convert.cc
index a8f7541..adbfb7d 100644
--- a/common/cow_operation_convert.cc
+++ b/common/cow_operation_convert.cc
@@ -24,23 +24,14 @@
 
 namespace chromeos_update_engine {
 
-namespace {
-
-bool IsConsecutive(const CowOperation& op1, const CowOperation& op2) {
-  return op1.op == op2.op && op1.dst_block + op1.block_count == op2.dst_block &&
-         op1.src_block + op1.block_count == op2.src_block;
-}
-
 void push_back(std::vector<CowOperation>* converted, const CowOperation& op) {
   if (!converted->empty() && IsConsecutive(converted->back(), op)) {
-    converted->back().block_count++;
+    converted->back().block_count += op.block_count;
   } else {
     converted->push_back(op);
   }
 }
 
-}  // namespace
-
 std::vector<CowOperation> ConvertToCowOperations(
     const ::google::protobuf::RepeatedPtrField<
         ::chromeos_update_engine::InstallOperation>& operations,
diff --git a/common/cow_operation_convert.h b/common/cow_operation_convert.h
index 60c820f..a260a4a 100644
--- a/common/cow_operation_convert.h
+++ b/common/cow_operation_convert.h
@@ -29,7 +29,7 @@
     CowCopy = android::snapshot::kCowCopyOp,
     CowReplace = android::snapshot::kCowReplaceOp,
   };
-  Type op;
+  Type op{};
   uint64_t src_block{};
   uint64_t dst_block{};
   uint64_t block_count{1};
@@ -53,5 +53,13 @@
         ::chromeos_update_engine::InstallOperation>& operations,
     const ::google::protobuf::RepeatedPtrField<CowMergeOperation>&
         merge_operations);
+
+constexpr bool IsConsecutive(const CowOperation& op1, const CowOperation& op2) {
+  return op1.op == op2.op && op1.dst_block + op1.block_count == op2.dst_block &&
+         op1.src_block + op1.block_count == op2.src_block;
+}
+
+void push_back(std::vector<CowOperation>* converted, const CowOperation& op);
+
 }  // namespace chromeos_update_engine
 #endif
diff --git a/common/download_action.h b/common/download_action.h
index ee6c8be..dd73a9d 100644
--- a/common/download_action.h
+++ b/common/download_action.h
@@ -25,7 +25,6 @@
 #include <string>
 #include <utility>
 
-#include "update_engine/common/action.h"
 #include "update_engine/common/boot_control_interface.h"
 #include "update_engine/common/http_fetcher.h"
 #include "update_engine/common/multi_range_http_fetcher.h"
diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h
index 2c01b1a..3f735bb 100644
--- a/common/dynamic_partition_control_interface.h
+++ b/common/dynamic_partition_control_interface.h
@@ -75,6 +75,9 @@
   virtual FeatureFlag GetVirtualAbCompressionFeatureFlag() = 0;
   // Return the feature flag for Virtual AB Compression XOR
   virtual FeatureFlag GetVirtualAbCompressionXorFeatureFlag() = 0;
+  // Returns whether userspace snapshots are enabled on the device, but not
+  // whether they're enabled for the update.
+  virtual FeatureFlag GetVirtualAbUserspaceSnapshotsFeatureFlag() = 0;
 
   // Attempt to optimize |operation|.
   // If successful, |optimized| contains an operation with extents that
diff --git a/common/dynamic_partition_control_stub.cc b/common/dynamic_partition_control_stub.cc
index 6283b1d..03f7361 100644
--- a/common/dynamic_partition_control_stub.cc
+++ b/common/dynamic_partition_control_stub.cc
@@ -38,7 +38,13 @@
   return FeatureFlag(FeatureFlag::Value::NONE);
 }
 
-FeatureFlag DynamicPartitionControlStub::GetVirtualAbCompressionXorFeatureFlag() {
+FeatureFlag
+DynamicPartitionControlStub::GetVirtualAbCompressionXorFeatureFlag() {
+  return FeatureFlag(FeatureFlag::Value::NONE);
+}
+
+FeatureFlag
+DynamicPartitionControlStub::GetVirtualAbUserspaceSnapshotsFeatureFlag() {
   return FeatureFlag(FeatureFlag::Value::NONE);
 }
 
diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h
index 15137d2..4236051 100644
--- a/common/dynamic_partition_control_stub.h
+++ b/common/dynamic_partition_control_stub.h
@@ -33,6 +33,7 @@
   FeatureFlag GetVirtualAbFeatureFlag() override;
   FeatureFlag GetVirtualAbCompressionFeatureFlag() override;
   FeatureFlag GetVirtualAbCompressionXorFeatureFlag() override;
+  FeatureFlag GetVirtualAbUserspaceSnapshotsFeatureFlag() override;
   bool OptimizeOperation(const std::string& partition_name,
                          const InstallOperation& operation,
                          InstallOperation* optimized) override;
diff --git a/common/file_fetcher.h b/common/file_fetcher.h
index 0f034e3..cc0e880 100644
--- a/common/file_fetcher.h
+++ b/common/file_fetcher.h
@@ -37,7 +37,7 @@
   // Returns whether the passed url is supported.
   static bool SupportedUrl(const std::string& url);
 
-  FileFetcher() : HttpFetcher(nullptr) {}
+  FileFetcher() : HttpFetcher() {}
 
   // Cleans up all internal state. Does not notify delegate.
   ~FileFetcher() override;
diff --git a/common/http_fetcher.cc b/common/http_fetcher.cc
index 5a98dfc..34caba4 100644
--- a/common/http_fetcher.cc
+++ b/common/http_fetcher.cc
@@ -25,9 +25,7 @@
 
 namespace chromeos_update_engine {
 
-HttpFetcher::~HttpFetcher() {
-  CancelProxyResolution();
-}
+HttpFetcher::~HttpFetcher() {}
 
 void HttpFetcher::SetPostData(const void* data,
                               size_t size,
@@ -43,51 +41,4 @@
   SetPostData(data, size, kHttpContentTypeUnspecified);
 }
 
-// Proxy methods to set the proxies, then to pop them off.
-void HttpFetcher::ResolveProxiesForUrl(const string& url,
-                                       const Closure& callback) {
-  CHECK_EQ(static_cast<Closure*>(nullptr), callback_.get());
-  callback_.reset(new Closure(callback));
-
-  if (!proxy_resolver_) {
-    LOG(INFO) << "Not resolving proxies (no proxy resolver).";
-    no_resolver_idle_id_ = MessageLoop::current()->PostTask(
-        FROM_HERE,
-        base::Bind(&HttpFetcher::NoProxyResolverCallback,
-                   base::Unretained(this)));
-    return;
-  }
-  proxy_request_ = proxy_resolver_->GetProxiesForUrl(
-      url, base::Bind(&HttpFetcher::ProxiesResolved, base::Unretained(this)));
-}
-
-void HttpFetcher::NoProxyResolverCallback() {
-  no_resolver_idle_id_ = MessageLoop::kTaskIdNull;
-  ProxiesResolved(deque<string>());
-}
-
-void HttpFetcher::ProxiesResolved(const deque<string>& proxies) {
-  proxy_request_ = kProxyRequestIdNull;
-  if (!proxies.empty())
-    SetProxies(proxies);
-  CHECK(callback_.get()) << "ProxiesResolved but none pending.";
-  Closure* callback = callback_.release();
-  // This may indirectly call back into ResolveProxiesForUrl():
-  callback->Run();
-  delete callback;
-}
-
-bool HttpFetcher::CancelProxyResolution() {
-  bool ret = false;
-  if (no_resolver_idle_id_ != MessageLoop::kTaskIdNull) {
-    ret = MessageLoop::current()->CancelTask(no_resolver_idle_id_);
-    no_resolver_idle_id_ = MessageLoop::kTaskIdNull;
-  }
-  if (proxy_request_ && proxy_resolver_) {
-    ret = proxy_resolver_->CancelProxyRequest(proxy_request_) || ret;
-    proxy_request_ = kProxyRequestIdNull;
-  }
-  return ret;
-}
-
 }  // namespace chromeos_update_engine
diff --git a/common/http_fetcher.h b/common/http_fetcher.h
index 80985af..f32c01d 100644
--- a/common/http_fetcher.h
+++ b/common/http_fetcher.h
@@ -26,10 +26,11 @@
 #include <base/logging.h>
 #include <base/macros.h>
 #include <brillo/message_loops/message_loop.h>
+#include <brillo/secure_blob.h>
 
+#include "update_engine/common/constants.h"
+#include "update_engine/common/error_code.h"
 #include "update_engine/common/http_common.h"
-#include "update_engine/common/metrics_constants.h"
-#include "update_engine/common/proxy_resolver.h"
 
 // This class is a simple wrapper around an HTTP library (libcurl). We can
 // easily mock out this interface for testing.
@@ -46,12 +47,11 @@
   // |proxy_resolver| is the resolver that will be consulted for proxy
   // settings. It may be null, in which case direct connections will
   // be used. Does not take ownership of the resolver.
-  explicit HttpFetcher(ProxyResolver* proxy_resolver)
+  explicit HttpFetcher()
       : post_data_set_(false),
         http_response_code_(0),
         delegate_(nullptr),
         proxies_(1, kNoProxy),
-        proxy_resolver_(proxy_resolver),
         callback_(nullptr) {}
   virtual ~HttpFetcher();
 
@@ -73,11 +73,7 @@
   // Same without a specified Content-Type.
   void SetPostData(const void* data, size_t size);
 
-  // Proxy methods to set the proxies, then to pop them off.
-  void ResolveProxiesForUrl(const std::string& url,
-                            const base::Closure& callback);
-
-  void SetProxies(const std::deque<std::string>& proxies) {
+  virtual void SetProxies(const std::deque<std::string>& proxies) {
     proxies_ = proxies;
   }
   const std::string& GetCurrentProxy() const { return proxies_.front(); }
@@ -144,21 +140,14 @@
   // Get the total number of bytes downloaded by fetcher.
   virtual size_t GetBytesDownloaded() = 0;
 
-  ProxyResolver* proxy_resolver() const { return proxy_resolver_; }
-
  protected:
-  // Cancels a proxy resolution in progress. The callback passed to
-  // ResolveProxiesForUrl() will not be called. Returns whether there was a
-  // pending proxy resolution to be canceled.
-  bool CancelProxyResolution();
-
   // The URL we're actively fetching from
   std::string url_;
 
   // POST data for the transfer, and whether or not it was ever set
   bool post_data_set_;
   brillo::Blob post_data_;
-  HttpContentType post_content_type_;
+  HttpContentType post_content_type_{};
 
   // The server's HTTP response code from the last transfer. This
   // field should be set to 0 when a new transfer is initiated, and
@@ -175,12 +164,6 @@
   // Proxy servers
   std::deque<std::string> proxies_;
 
-  ProxyResolver* const proxy_resolver_;
-
-  // The ID of the idle callback, used when we have no proxy resolver.
-  brillo::MessageLoop::TaskId no_resolver_idle_id_{
-      brillo::MessageLoop::kTaskIdNull};
-
   // Callback for when we are resolving proxies
   std::unique_ptr<base::Closure> callback_;
 
@@ -192,10 +175,6 @@
   // |proxy_resolver_|.
   void NoProxyResolverCallback();
 
-  // Stores the ongoing proxy request id if there is one, otherwise
-  // kProxyRequestIdNull.
-  ProxyRequestId proxy_request_{kProxyRequestIdNull};
-
   DISALLOW_COPY_AND_ASSIGN(HttpFetcher);
 };
 
diff --git a/common/http_fetcher_unittest.cc b/common/http_fetcher_unittest.cc
index bc8a325..06f3e15 100644
--- a/common/http_fetcher_unittest.cc
+++ b/common/http_fetcher_unittest.cc
@@ -20,6 +20,7 @@
 #include <unistd.h>
 
 #include <algorithm>
+#include <deque>
 #include <memory>
 #include <string>
 #include <utility>
@@ -49,15 +50,14 @@
 #endif  // __CHROMEOS__
 #include <brillo/streams/file_stream.h>
 #include <brillo/streams/stream.h>
+#include <gmock/gmock.h>
 #include <gtest/gtest.h>
 
 #include "update_engine/common/fake_hardware.h"
 #include "update_engine/common/file_fetcher.h"
 #include "update_engine/common/http_common.h"
 #include "update_engine/common/mock_http_fetcher.h"
-#include "update_engine/common/mock_proxy_resolver.h"
 #include "update_engine/common/multi_range_http_fetcher.h"
-#include "update_engine/common/proxy_resolver.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/libcurl_http_fetcher.h"
@@ -141,7 +141,7 @@
     vector<char> buf(128);
     string line;
     while (line.find('\n') == string::npos) {
-      size_t read;
+      size_t read{};
       if (!stdout->ReadBlocking(buf.data(), buf.size(), &read, nullptr)) {
         ADD_FAILURE() << "error reading http server stdout";
         return;
@@ -200,19 +200,16 @@
   AnyHttpFetcherFactory() {}
   virtual ~AnyHttpFetcherFactory() {}
 
-  virtual HttpFetcher* NewLargeFetcher(ProxyResolver* proxy_resolver) = 0;
+  virtual HttpFetcher* NewLargeFetcher() = 0;
   HttpFetcher* NewLargeFetcher(size_t num_proxies) {
-    proxy_resolver_.set_num_proxies(num_proxies);
-    return NewLargeFetcher(&proxy_resolver_);
-  }
-  HttpFetcher* NewLargeFetcher() { return NewLargeFetcher(1); }
+    auto res = NewLargeFetcher();
 
-  virtual HttpFetcher* NewSmallFetcher(ProxyResolver* proxy_resolver) = 0;
-  HttpFetcher* NewSmallFetcher() {
-    proxy_resolver_.set_num_proxies(1);
-    return NewSmallFetcher(&proxy_resolver_);
+    res->SetProxies(std::deque<std::string>(num_proxies, kNoProxy));
+    return res;
   }
 
+  virtual HttpFetcher* NewSmallFetcher() = 0;
+
   virtual string BigUrl(in_port_t port) const { return kUnusedUrl; }
   virtual string SmallUrl(in_port_t port) const { return kUnusedUrl; }
   virtual string ErrorUrl(in_port_t port) const { return kUnusedUrl; }
@@ -229,7 +226,6 @@
   FakeHardware* fake_hardware() { return &fake_hardware_; }
 
  protected:
-  DirectProxyResolver proxy_resolver_;
   FakeHardware fake_hardware_;
 };
 
@@ -237,16 +233,15 @@
  public:
   // Necessary to unhide the definition in the base class.
   using AnyHttpFetcherFactory::NewLargeFetcher;
-  HttpFetcher* NewLargeFetcher(ProxyResolver* proxy_resolver) override {
+  HttpFetcher* NewLargeFetcher() override {
     brillo::Blob big_data(1000000);
-    return new MockHttpFetcher(
-        big_data.data(), big_data.size(), proxy_resolver);
+    return new MockHttpFetcher(big_data.data(), big_data.size());
   }
 
   // Necessary to unhide the definition in the base class.
   using AnyHttpFetcherFactory::NewSmallFetcher;
-  HttpFetcher* NewSmallFetcher(ProxyResolver* proxy_resolver) override {
-    return new MockHttpFetcher("x", 1, proxy_resolver);
+  HttpFetcher* NewSmallFetcher() override {
+    return new MockHttpFetcher("x", 1);
   }
 
   bool IsMock() const override { return true; }
@@ -261,9 +256,8 @@
  public:
   // Necessary to unhide the definition in the base class.
   using AnyHttpFetcherFactory::NewLargeFetcher;
-  HttpFetcher* NewLargeFetcher(ProxyResolver* proxy_resolver) override {
-    LibcurlHttpFetcher* ret =
-        new LibcurlHttpFetcher(proxy_resolver, &fake_hardware_);
+  HttpFetcher* NewLargeFetcher() override {
+    LibcurlHttpFetcher* ret = new LibcurlHttpFetcher(&fake_hardware_);
     // Speed up test execution.
     ret->set_idle_seconds(1);
     ret->set_retry_seconds(1);
@@ -273,9 +267,8 @@
 
   // Necessary to unhide the definition in the base class.
   using AnyHttpFetcherFactory::NewSmallFetcher;
-  HttpFetcher* NewSmallFetcher(ProxyResolver* proxy_resolver) override {
-    return NewLargeFetcher(proxy_resolver);
-  }
+
+  HttpFetcher* NewSmallFetcher() override { return NewLargeFetcher(); }
 
   string BigUrl(in_port_t port) const override {
     return LocalServerUrlForPath(
@@ -304,9 +297,9 @@
  public:
   // Necessary to unhide the definition in the base class.
   using AnyHttpFetcherFactory::NewLargeFetcher;
-  HttpFetcher* NewLargeFetcher(ProxyResolver* proxy_resolver) override {
-    MultiRangeHttpFetcher* ret = new MultiRangeHttpFetcher(
-        new LibcurlHttpFetcher(proxy_resolver, &fake_hardware_));
+  HttpFetcher* NewLargeFetcher() override {
+    MultiRangeHttpFetcher* ret =
+        new MultiRangeHttpFetcher(new LibcurlHttpFetcher(&fake_hardware_));
     ret->ClearRanges();
     ret->AddRange(0);
     // Speed up test execution.
@@ -318,9 +311,7 @@
 
   // Necessary to unhide the definition in the base class.
   using AnyHttpFetcherFactory::NewSmallFetcher;
-  HttpFetcher* NewSmallFetcher(ProxyResolver* proxy_resolver) override {
-    return NewLargeFetcher(proxy_resolver);
-  }
+  HttpFetcher* NewSmallFetcher() override { return NewLargeFetcher(); }
 
   bool IsMulti() const override { return true; }
 };
@@ -329,15 +320,11 @@
  public:
   // Necessary to unhide the definition in the base class.
   using AnyHttpFetcherFactory::NewLargeFetcher;
-  HttpFetcher* NewLargeFetcher(ProxyResolver* /* proxy_resolver */) override {
-    return new FileFetcher();
-  }
+  HttpFetcher* NewLargeFetcher() override { return new FileFetcher(); }
 
   // Necessary to unhide the definition in the base class.
   using AnyHttpFetcherFactory::NewSmallFetcher;
-  HttpFetcher* NewSmallFetcher(ProxyResolver* proxy_resolver) override {
-    return NewLargeFetcher(proxy_resolver);
-  }
+  HttpFetcher* NewSmallFetcher() override { return NewLargeFetcher(); }
 
   string BigUrl(in_port_t port) const override {
     static string big_contents = []() {
@@ -378,7 +365,7 @@
  public:
   // Necessary to unhide the definition in the base class.
   using AnyHttpFetcherFactory::NewLargeFetcher;
-  HttpFetcher* NewLargeFetcher(ProxyResolver* /* proxy_resolver */) override {
+  HttpFetcher* NewLargeFetcher() override {
     MultiRangeHttpFetcher* ret = new MultiRangeHttpFetcher(new FileFetcher());
     ret->ClearRanges();
     // FileFetcher doesn't support range with unspecified length.
@@ -392,9 +379,7 @@
 
   // Necessary to unhide the definition in the base class.
   using AnyHttpFetcherFactory::NewSmallFetcher;
-  HttpFetcher* NewSmallFetcher(ProxyResolver* proxy_resolver) override {
-    return NewLargeFetcher(proxy_resolver);
-  }
+  HttpFetcher* NewSmallFetcher() override { return NewLargeFetcher(); }
 
   bool IsMulti() const override { return true; }
 };
@@ -636,7 +621,7 @@
   unique_ptr<HttpServer> server(this->test_.CreateServer());
   ASSERT_TRUE(server->started_);
 
-  MessageLoop::TaskId callback_id;
+  MessageLoop::TaskId callback_id{};
   callback_id = this->loop_.PostDelayedTask(
       FROM_HERE,
       base::Bind(&UnpausingTimeoutCallback, &delegate, &callback_id),
@@ -652,23 +637,16 @@
 TYPED_TEST(HttpFetcherTest, PauseWhileResolvingProxyTest) {
   if (this->test_.IsMock() || !this->test_.IsHttpSupported())
     return;
-  MockProxyResolver mock_resolver;
-  unique_ptr<HttpFetcher> fetcher(this->test_.NewLargeFetcher(&mock_resolver));
+  unique_ptr<HttpFetcher> fetcher(this->test_.NewLargeFetcher());
 
   // Saved arguments from the proxy call.
-  ProxiesResolvedFn proxy_callback;
-  EXPECT_CALL(mock_resolver, GetProxiesForUrl("http://fake_url", _))
-      .WillOnce(DoAll(SaveArg<1>(&proxy_callback), Return(true)));
   fetcher->BeginTransfer("http://fake_url");
-  testing::Mock::VerifyAndClearExpectations(&mock_resolver);
 
   // Pausing and unpausing while resolving the proxy should not affect anything.
   fetcher->Pause();
   fetcher->Unpause();
   fetcher->Pause();
   // Proxy resolver comes back after we paused the fetcher.
-  ASSERT_FALSE(proxy_callback.is_null());
-  proxy_callback.Run({1, kNoProxy});
 }
 
 class AbortingHttpFetcherTestDelegate : public HttpFetcherDelegate {
@@ -740,21 +718,16 @@
 TYPED_TEST(HttpFetcherTest, TerminateTransferWhileResolvingProxyTest) {
   if (this->test_.IsMock() || !this->test_.IsHttpSupported())
     return;
-  MockProxyResolver mock_resolver;
-  unique_ptr<HttpFetcher> fetcher(this->test_.NewLargeFetcher(&mock_resolver));
+  unique_ptr<HttpFetcher> fetcher(this->test_.NewLargeFetcher());
 
   HttpFetcherTestDelegate delegate;
   fetcher->set_delegate(&delegate);
 
-  EXPECT_CALL(mock_resolver, GetProxiesForUrl(_, _)).WillOnce(Return(123));
   fetcher->BeginTransfer("http://fake_url");
   // Run the message loop until idle. This must call the MockProxyResolver with
   // the request.
   while (this->loop_.RunOnce(false)) {
   }
-  testing::Mock::VerifyAndClearExpectations(&mock_resolver);
-
-  EXPECT_CALL(mock_resolver, CancelProxyRequest(123)).WillOnce(Return(true));
 
   // Terminate the transfer right before the proxy resolution response.
   fetcher->TerminateTransfer();
@@ -1300,7 +1273,7 @@
   vector<pair<off_t, off_t>> ranges;
   ranges.push_back(make_pair(0, 25));
   ranges.push_back(make_pair(99, 0));
-  MultiTest(this->test_.NewLargeFetcher(2),
+  MultiTest(this->test_.NewLargeFetcher(),
             this->test_.fake_hardware(),
             LocalServerUrlForPath(
                 server->GetPort(),
diff --git a/common/metrics_constants.h b/common/metrics_constants.h
index b7633b9..af40300 100644
--- a/common/metrics_constants.h
+++ b/common/metrics_constants.h
@@ -17,6 +17,7 @@
 #ifndef UPDATE_ENGINE_COMMON_METRICS_CONSTANTS_H_
 #define UPDATE_ENGINE_COMMON_METRICS_CONSTANTS_H_
 
+#include <chrono>
 namespace chromeos_update_engine {
 
 namespace metrics {
@@ -140,6 +141,8 @@
   kNumConstants
 };
 
+constexpr auto kMetricFlushInterval = std::chrono::seconds(3);
+
 }  // namespace metrics
 
 }  // namespace chromeos_update_engine
diff --git a/common/mock_dynamic_partition_control.h b/common/mock_dynamic_partition_control.h
index fd0a5a9..8cff28f 100644
--- a/common/mock_dynamic_partition_control.h
+++ b/common/mock_dynamic_partition_control.h
@@ -38,6 +38,10 @@
               GetVirtualAbCompressionXorFeatureFlag,
               (),
               (override));
+  MOCK_METHOD(FeatureFlag,
+              GetVirtualAbUserspaceSnapshotsFeatureFlag,
+              (),
+              (override));
   MOCK_METHOD(FeatureFlag, GetVirtualAbFeatureFlag, (), (override));
   MOCK_METHOD(bool, FinishUpdate, (bool), (override));
   MOCK_METHOD(std::unique_ptr<FileDescriptor>,
diff --git a/common/mock_http_fetcher.h b/common/mock_http_fetcher.h
index 3d7859b..0d4b130 100644
--- a/common/mock_http_fetcher.h
+++ b/common/mock_http_fetcher.h
@@ -42,10 +42,8 @@
  public:
   // The data passed in here is copied and then passed to the delegate after
   // the transfer begins.
-  MockHttpFetcher(const uint8_t* data,
-                  size_t size,
-                  ProxyResolver* proxy_resolver)
-      : HttpFetcher(proxy_resolver),
+  MockHttpFetcher(const uint8_t* data, size_t size)
+      : HttpFetcher(),
         sent_offset_(0),
         timeout_id_(brillo::MessageLoop::kTaskIdNull),
         paused_(false),
@@ -55,9 +53,8 @@
   }
 
   // Constructor overload for string data.
-  MockHttpFetcher(const char* data, size_t size, ProxyResolver* proxy_resolver)
-      : MockHttpFetcher(
-            reinterpret_cast<const uint8_t*>(data), size, proxy_resolver) {}
+  MockHttpFetcher(const char* data, size_t size)
+      : MockHttpFetcher(reinterpret_cast<const uint8_t*>(data), size) {}
 
   // Cleans up all internal state. Does not notify delegate
   ~MockHttpFetcher() override;
diff --git a/common/mock_proxy_resolver.h b/common/mock_proxy_resolver.h
deleted file mode 100644
index 67de68f..0000000
--- a/common/mock_proxy_resolver.h
+++ /dev/null
@@ -1,38 +0,0 @@
-//
-// Copyright (C) 2016 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_COMMON_MOCK_PROXY_RESOLVER_H_
-#define UPDATE_ENGINE_COMMON_MOCK_PROXY_RESOLVER_H_
-
-#include <string>
-
-#include <gmock/gmock.h>
-
-#include "update_engine/common/proxy_resolver.h"
-
-namespace chromeos_update_engine {
-
-class MockProxyResolver : public ProxyResolver {
- public:
-  MOCK_METHOD2(GetProxiesForUrl,
-               ProxyRequestId(const std::string& url,
-                              const ProxiesResolvedFn& callback));
-  MOCK_METHOD1(CancelProxyRequest, bool(ProxyRequestId request));
-};
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_COMMON_MOCK_PROXY_RESOLVER_H_
diff --git a/common/multi_range_http_fetcher.h b/common/multi_range_http_fetcher.h
index ef32f0d..849ed32 100644
--- a/common/multi_range_http_fetcher.h
+++ b/common/multi_range_http_fetcher.h
@@ -46,7 +46,7 @@
  public:
   // Takes ownership of the passed in fetcher.
   explicit MultiRangeHttpFetcher(HttpFetcher* base_fetcher)
-      : HttpFetcher(base_fetcher->proxy_resolver()),
+      : HttpFetcher(),
         base_fetcher_(base_fetcher),
         base_fetcher_active_(false),
         pending_transfer_ended_(false),
@@ -101,7 +101,8 @@
   }
   // TODO(deymo): Determine if this method should be virtual in HttpFetcher so
   // this call is sent to the base_fetcher_.
-  virtual void SetProxies(const std::deque<std::string>& proxies) {
+  void SetProxies(const std::deque<std::string>& proxies) override {
+    HttpFetcher::SetProxies(proxies);
     base_fetcher_->SetProxies(proxies);
   }
 
diff --git a/common/prefs.cc b/common/prefs.cc
index f33a8a9..a070302 100644
--- a/common/prefs.cc
+++ b/common/prefs.cc
@@ -202,8 +202,8 @@
     // to parent directories where we might not have permission to write to.
     TEST_AND_RETURN_FALSE(base::CreateDirectory(filename.DirName()));
   }
-  TEST_AND_RETURN_FALSE(base::WriteFile(filename, value.data(), value.size()) ==
-                        static_cast<int>(value.size()));
+  TEST_AND_RETURN_FALSE(
+      utils::WriteStringToFileAtomic(filename.value(), value));
   return true;
 }
 
diff --git a/common/proxy_resolver.cc b/common/proxy_resolver.cc
deleted file mode 100644
index 0591c3e..0000000
--- a/common/proxy_resolver.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/common/proxy_resolver.h"
-
-#include <base/bind.h>
-#include <base/location.h>
-
-using brillo::MessageLoop;
-using std::deque;
-using std::string;
-
-namespace chromeos_update_engine {
-
-const char kNoProxy[] = "direct://";
-const ProxyRequestId kProxyRequestIdNull = brillo::MessageLoop::kTaskIdNull;
-
-DirectProxyResolver::~DirectProxyResolver() {
-  if (idle_callback_id_ != MessageLoop::kTaskIdNull) {
-    // The DirectProxyResolver is instantiated as part of the UpdateAttempter
-    // which is also instantiated by default by the FakeSystemState, even when
-    // it is not used. We check the manage_shares_id_ before calling the
-    // MessageLoop::current() since the unit test using a FakeSystemState may
-    // have not define a MessageLoop for the current thread.
-    MessageLoop::current()->CancelTask(idle_callback_id_);
-    idle_callback_id_ = MessageLoop::kTaskIdNull;
-  }
-}
-
-ProxyRequestId DirectProxyResolver::GetProxiesForUrl(
-    const string& url, const ProxiesResolvedFn& callback) {
-  idle_callback_id_ = MessageLoop::current()->PostTask(
-      FROM_HERE,
-      base::Bind(&DirectProxyResolver::ReturnCallback,
-                 base::Unretained(this),
-                 callback));
-  return idle_callback_id_;
-}
-
-bool DirectProxyResolver::CancelProxyRequest(ProxyRequestId request) {
-  return MessageLoop::current()->CancelTask(request);
-}
-
-void DirectProxyResolver::ReturnCallback(const ProxiesResolvedFn& callback) {
-  idle_callback_id_ = MessageLoop::kTaskIdNull;
-
-  // Initialize proxy pool with as many proxies as indicated (all identical).
-  deque<string> proxies(num_proxies_, kNoProxy);
-
-  callback.Run(proxies);
-}
-
-}  // namespace chromeos_update_engine
diff --git a/common/proxy_resolver.h b/common/proxy_resolver.h
deleted file mode 100644
index 9bd51fc..0000000
--- a/common/proxy_resolver.h
+++ /dev/null
@@ -1,98 +0,0 @@
-//
-// Copyright (C) 2010 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef UPDATE_ENGINE_COMMON_PROXY_RESOLVER_H_
-#define UPDATE_ENGINE_COMMON_PROXY_RESOLVER_H_
-
-#include <deque>
-#include <string>
-
-#include <base/logging.h>
-#include <brillo/message_loops/message_loop.h>
-
-#include "update_engine/common/utils.h"
-
-namespace chromeos_update_engine {
-
-extern const char kNoProxy[];
-
-// Callback for a call to GetProxiesForUrl().
-// Resultant proxies are in |out_proxy|. Each will be in one of the
-// following forms:
-// http://<host>[:<port>] - HTTP proxy
-// socks{4,5}://<host>[:<port>] - SOCKS4/5 proxy
-// kNoProxy - no proxy
-typedef base::Callback<void(const std::deque<std::string>& proxies)>
-    ProxiesResolvedFn;
-
-// An id that identifies a proxy request. Used to cancel an ongoing request
-// before the callback is called.
-typedef brillo::MessageLoop::TaskId ProxyRequestId;
-
-// A constant identifying an invalid ProxyRequestId.
-extern const ProxyRequestId kProxyRequestIdNull;
-
-class ProxyResolver {
- public:
-  ProxyResolver() {}
-  virtual ~ProxyResolver() {}
-
-  // Finds proxies for the given URL and returns them via the callback.
-  // Returns the id of the pending request on success or kProxyRequestIdNull
-  // otherwise.
-  virtual ProxyRequestId GetProxiesForUrl(
-      const std::string& url, const ProxiesResolvedFn& callback) = 0;
-
-  // Cancel the proxy resolution request initiated by GetProxiesForUrl(). The
-  // |request| value must be the one provided by GetProxiesForUrl().
-  virtual bool CancelProxyRequest(ProxyRequestId request) = 0;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(ProxyResolver);
-};
-
-// Always says to not use a proxy
-class DirectProxyResolver : public ProxyResolver {
- public:
-  DirectProxyResolver() = default;
-  ~DirectProxyResolver() override;
-  ProxyRequestId GetProxiesForUrl(const std::string& url,
-                                  const ProxiesResolvedFn& callback) override;
-  bool CancelProxyRequest(ProxyRequestId request) override;
-
-  // Set the number of direct (non-) proxies to be returned by resolver.
-  // The default value is 1; higher numbers are currently used in testing.
-  inline void set_num_proxies(size_t num_proxies) {
-    num_proxies_ = num_proxies;
-  }
-
- private:
-  // The ID of the main loop callback.
-  brillo::MessageLoop::TaskId idle_callback_id_{
-      brillo::MessageLoop::kTaskIdNull};
-
-  // Number of direct proxies to return on resolved list; currently used for
-  // testing.
-  size_t num_proxies_{1};
-
-  // The MainLoop callback, from here we return to the client.
-  void ReturnCallback(const ProxiesResolvedFn& callback);
-  DISALLOW_COPY_AND_ASSIGN(DirectProxyResolver);
-};
-
-}  // namespace chromeos_update_engine
-
-#endif  // UPDATE_ENGINE_COMMON_PROXY_RESOLVER_H_
diff --git a/common/proxy_resolver_unittest.cc b/common/proxy_resolver_unittest.cc
deleted file mode 100644
index 101bf6b..0000000
--- a/common/proxy_resolver_unittest.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-//
-// Copyright (C) 2017 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#include "update_engine/common/proxy_resolver.h"
-
-#include <deque>
-#include <string>
-
-#include <gtest/gtest.h>
-
-#include <base/bind.h>
-#include <brillo/message_loops/fake_message_loop.h>
-
-using std::deque;
-using std::string;
-
-namespace chromeos_update_engine {
-
-class ProxyResolverTest : public ::testing::Test {
- protected:
-  virtual ~ProxyResolverTest() = default;
-
-  void SetUp() override { loop_.SetAsCurrent(); }
-
-  void TearDown() override { EXPECT_FALSE(loop_.PendingTasks()); }
-
-  brillo::FakeMessageLoop loop_{nullptr};
-  DirectProxyResolver resolver_;
-};
-
-TEST_F(ProxyResolverTest, DirectProxyResolverCallbackTest) {
-  bool called = false;
-  deque<string> callback_proxies;
-  auto callback = base::Bind(
-      [](bool* called,
-         deque<string>* callback_proxies,
-         const deque<string>& proxies) {
-        *called = true;
-        *callback_proxies = proxies;
-      },
-      &called,
-      &callback_proxies);
-
-  EXPECT_NE(kProxyRequestIdNull,
-            resolver_.GetProxiesForUrl("http://foo", callback));
-  // Check the callback is not called until the message loop runs.
-  EXPECT_FALSE(called);
-  loop_.Run();
-  EXPECT_TRUE(called);
-  EXPECT_EQ(kNoProxy, callback_proxies.front());
-}
-
-TEST_F(ProxyResolverTest, DirectProxyResolverCancelCallbackTest) {
-  bool called = false;
-  auto callback = base::Bind(
-      [](bool* called, const deque<string>& proxies) { *called = true; },
-      &called);
-
-  ProxyRequestId request = resolver_.GetProxiesForUrl("http://foo", callback);
-  EXPECT_FALSE(called);
-  EXPECT_TRUE(resolver_.CancelProxyRequest(request));
-  loop_.Run();
-  EXPECT_FALSE(called);
-}
-
-TEST_F(ProxyResolverTest, DirectProxyResolverSimultaneousCallbacksTest) {
-  int called = 0;
-  auto callback = base::Bind(
-      [](int* called, const deque<string>& proxies) { (*called)++; }, &called);
-
-  resolver_.GetProxiesForUrl("http://foo", callback);
-  resolver_.GetProxiesForUrl("http://bar", callback);
-  EXPECT_EQ(0, called);
-  loop_.Run();
-  EXPECT_EQ(2, called);
-}
-
-}  // namespace chromeos_update_engine
diff --git a/common/utils.cc b/common/utils.cc
index 0b76eea..4c1365a 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -35,6 +35,7 @@
 #include <unistd.h>
 
 #include <algorithm>
+#include <filesystem>
 #include <utility>
 #include <vector>
 
@@ -55,9 +56,8 @@
 #include <brillo/data_encoding.h>
 
 #include "update_engine/common/constants.h"
-#include "update_engine/common/platform_constants.h"
-#include "update_engine/common/prefs_interface.h"
 #include "update_engine/common/subprocess.h"
+#include "update_engine/common/platform_constants.h"
 #include "update_engine/payload_consumer/file_descriptor.h"
 
 using base::Time;
@@ -359,7 +359,7 @@
 }
 
 off_t BlockDevSize(int fd) {
-  uint64_t dev_size;
+  uint64_t dev_size{};
   int rc = ioctl(fd, BLKGETSIZE64, &dev_size);
   if (rc == -1) {
     dev_size = -1;
@@ -369,7 +369,7 @@
 }
 
 off_t FileSize(int fd) {
-  struct stat stbuf;
+  struct stat stbuf {};
   int rc = fstat(fd, &stbuf);
   CHECK_EQ(rc, 0);
   if (rc < 0) {
@@ -410,6 +410,53 @@
   return true;
 }
 
+bool FsyncDirectory(const char* dirname) {
+  android::base::unique_fd fd(
+      TEMP_FAILURE_RETRY(open(dirname, O_RDONLY | O_CLOEXEC)));
+  if (fd == -1) {
+    PLOG(ERROR) << "Failed to open " << dirname;
+    return false;
+  }
+  if (fsync(fd) == -1) {
+    if (errno == EROFS || errno == EINVAL) {
+      PLOG(WARNING) << "Skip fsync " << dirname
+                    << " on a file system does not support synchronization";
+    } else {
+      PLOG(ERROR) << "Failed to fsync " << dirname;
+      return false;
+    }
+  }
+  return true;
+}
+
+bool WriteStringToFileAtomic(const std::string& path,
+                             std::string_view content) {
+  const std::string tmp_path = path + ".tmp";
+  {
+    const int flags = O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC;
+    android::base::unique_fd fd(
+        TEMP_FAILURE_RETRY(open(tmp_path.c_str(), flags, 0644)));
+    if (fd == -1) {
+      PLOG(ERROR) << "Failed to open " << path;
+      return false;
+    }
+    if (!WriteAll(fd.get(), content.data(), content.size())) {
+      PLOG(ERROR) << "Failed to write to fd " << fd;
+      return false;
+    }
+    // rename() without fsync() is not safe. Data could still be living on page
+    // cache. To ensure atomiticity, call fsync()
+    if (fsync(fd) != 0) {
+      PLOG(ERROR) << "Failed to fsync " << tmp_path;
+    }
+  }
+  if (rename(tmp_path.c_str(), path.c_str()) == -1) {
+    PLOG(ERROR) << "rename failed from " << tmp_path << " to " << path;
+    return false;
+  }
+  return FsyncDirectory(std::filesystem::path(path).parent_path().c_str());
+}
+
 void HexDumpArray(const uint8_t* const arr, const size_t length) {
   LOG(INFO) << "Logging array of length: " << length;
   const unsigned int bytes_per_line = 16;
@@ -491,17 +538,17 @@
 }
 
 bool FileExists(const char* path) {
-  struct stat stbuf;
+  struct stat stbuf {};
   return 0 == lstat(path, &stbuf);
 }
 
 bool IsSymlink(const char* path) {
-  struct stat stbuf;
+  struct stat stbuf {};
   return lstat(path, &stbuf) == 0 && S_ISLNK(stbuf.st_mode) != 0;
 }
 
 bool IsRegFile(const char* path) {
-  struct stat stbuf;
+  struct stat stbuf {};
   return lstat(path, &stbuf) == 0 && S_ISREG(stbuf.st_mode) != 0;
 }
 
@@ -539,7 +586,7 @@
   }
   ScopedFdCloser fd_closer(&fd);
   // We take no action if not needed.
-  int read_only_flag;
+  int read_only_flag{};
   int expected_flag = read_only ? 1 : 0;
   int rc = ioctl(fd, BLKROGET, &read_only_flag);
   // In case of failure reading the setting we will try to set it anyway.
@@ -607,7 +654,8 @@
 }
 
 bool IsMountpoint(const std::string& mountpoint) {
-  struct stat stdir, stparent;
+  struct stat stdir {
+  }, stparent{};
 
   // Check whether the passed mountpoint is a directory and the /.. is in the
   // same device or not. If mountpoint/.. is in a different device it means that
@@ -678,7 +726,7 @@
   // and size is the same for both 32 and 64 bits.
   if (size < offsetof(Elf32_Ehdr, e_machine) + sizeof(hdr->e_machine))
     return true;
-  uint16_t e_machine;
+  uint16_t e_machine{};
   // Fix endianness regardless of the host endianness.
   if (ei_data == ELFDATA2LSB)
     e_machine = le16toh(hdr->e_machine);
@@ -766,7 +814,7 @@
 }
 
 string ToString(const Time utc_time) {
-  Time::Exploded exp_time;
+  Time::Exploded exp_time{};
   utc_time.UTCExplode(&exp_time);
   return base::StringPrintf("%d/%d/%d %d:%02d:%02d GMT",
                             exp_time.month,
@@ -1004,7 +1052,7 @@
   }
   vector<string> tokens = base::SplitString(
       version, ".", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
-  int value;
+  int value{};
   if (tokens.empty() || !base::StringToInt(tokens[0], &value))
     return -1;  // Target version is invalid.
   return value;
@@ -1025,8 +1073,8 @@
     return;
   }
 
-  int high;
-  int low;
+  int high{};
+  int low{};
   if (!(base::StringToInt(parts[0], &high) &&
         base::StringToInt(parts[1], &low))) {
     // Both parts of the version could not be parsed correctly.
@@ -1051,7 +1099,7 @@
 }
 
 string GetTimeAsString(time_t utime) {
-  struct tm tm;
+  struct tm tm {};
   CHECK_EQ(localtime_r(&utime, &tm), &tm);
   char str[16];
   CHECK_EQ(strftime(str, sizeof(str), "%Y%m%d-%H%M%S", &tm), 15u);
diff --git a/common/utils.h b/common/utils.h
index 201e47e..0087794 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -162,6 +162,9 @@
 
 bool SendFile(int out_fd, int in_fd, size_t count);
 
+bool FsyncDirectory(const char* dirname);
+bool WriteStringToFileAtomic(const std::string& path, std::string_view content);
+
 // Returns true if the file exists for sure. Returns false if it doesn't exist,
 // or an error occurs.
 bool FileExists(const char* path);
diff --git a/download_action.cc b/download_action.cc
index 0358569..c0463de 100644
--- a/download_action.cc
+++ b/download_action.cc
@@ -25,7 +25,6 @@
 #include <base/metrics/statistics_recorder.h>
 #include <base/strings/stringprintf.h>
 
-#include "update_engine/common/action_pipe.h"
 #include "update_engine/common/boot_control_interface.h"
 #include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/multi_range_http_fetcher.h"
@@ -106,7 +105,7 @@
     return false;
   }
 
-  ErrorCode error;
+  ErrorCode error{};
   const bool success =
       delta_performer_->Write(
           cached_manifest_bytes.data(), cached_manifest_bytes.size(), &error) &&
diff --git a/download_action_android_unittest.cc b/download_action_android_unittest.cc
index bef4342..968f875 100644
--- a/download_action_android_unittest.cc
+++ b/download_action_android_unittest.cc
@@ -37,7 +37,6 @@
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/payload_consumer/payload_constants.h"
-#include "update_engine/payload_generator/annotated_operation.h"
 #include "update_engine/payload_generator/payload_file.h"
 #include "update_engine/payload_generator/payload_signer.h"
 
@@ -70,8 +69,7 @@
       .WillRepeatedly(DoAll(SetArgPointee<1>(data), Return(true)));
 
   BootControlStub boot_control;
-  MockHttpFetcher* http_fetcher =
-      new MockHttpFetcher(data.data(), data.size(), nullptr);
+  MockHttpFetcher* http_fetcher = new MockHttpFetcher(data.data(), data.size());
   http_fetcher->set_delay(false);
   InstallPlan install_plan;
   auto& payload = install_plan.payloads.emplace_back();
@@ -139,8 +137,7 @@
       .WillRepeatedly(DoAll(SetArgPointee<1>(0), Return(true)));
 
   BootControlStub boot_control;
-  MockHttpFetcher* http_fetcher =
-      new MockHttpFetcher(data.data(), data.size(), nullptr);
+  MockHttpFetcher* http_fetcher = new MockHttpFetcher(data.data(), data.size());
   http_fetcher->set_delay(false);
   InstallPlan install_plan;
   auto& payload = install_plan.payloads.emplace_back();
diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc
index 1599aac..b8d11f5 100644
--- a/libcurl_http_fetcher.cc
+++ b/libcurl_http_fetcher.cc
@@ -41,6 +41,7 @@
 #include "update_engine/certificate_checker.h"
 #include "update_engine/common/hardware_interface.h"
 #include "update_engine/common/platform_constants.h"
+#include "update_engine/common/utils.h"
 
 using base::TimeDelta;
 using brillo::MessageLoop;
@@ -92,9 +93,8 @@
   return 1;
 }
 
-LibcurlHttpFetcher::LibcurlHttpFetcher(ProxyResolver* proxy_resolver,
-                                       HardwareInterface* hardware)
-    : HttpFetcher(proxy_resolver), hardware_(hardware) {
+LibcurlHttpFetcher::LibcurlHttpFetcher(HardwareInterface* hardware)
+    : hardware_(hardware) {
   // Dev users want a longer timeout (180 seconds) because they may
   // be waiting on the dev server to build an image.
   if (!hardware_->IsOfficialBuild())
@@ -106,7 +106,6 @@
 LibcurlHttpFetcher::~LibcurlHttpFetcher() {
   LOG_IF(ERROR, transfer_in_progress_)
       << "Destroying the fetcher while a transfer is in progress.";
-  CancelProxyResolution();
   CleanUp();
 }
 
@@ -167,7 +166,7 @@
                  curl_handle_, CURLOPT_PROXY, GetCurrentProxy().c_str()),
              CURLE_OK);
     // Curl seems to require us to set the protocol
-    curl_proxytype type;
+    curl_proxytype type{};
     if (GetProxyType(GetCurrentProxy(), &type)) {
       CHECK_EQ(curl_easy_setopt(curl_handle_, CURLOPT_PROXYTYPE, type),
                CURLE_OK);
@@ -338,12 +337,7 @@
 void LibcurlHttpFetcher::BeginTransfer(const string& url) {
   CHECK(!transfer_in_progress_);
   url_ = url;
-  auto closure =
-      base::Bind(&LibcurlHttpFetcher::ProxiesResolved, base::Unretained(this));
-  ResolveProxiesForUrl(url_, closure);
-}
 
-void LibcurlHttpFetcher::ProxiesResolved() {
   transfer_size_ = -1;
   resume_offset_ = 0;
   retry_count_ = 0;
@@ -362,7 +356,6 @@
 }
 
 void LibcurlHttpFetcher::ForceTransferTermination() {
-  CancelProxyResolution();
   CleanUp();
   if (delegate_) {
     // Note that after the callback returns this object may be destroyed.
@@ -604,7 +597,7 @@
 
   sent_byte_ = true;
   {
-    double transfer_size_double;
+    double transfer_size_double{};
     CHECK_EQ(curl_easy_getinfo(curl_handle_,
                                CURLINFO_CONTENT_LENGTH_DOWNLOAD,
                                &transfer_size_double),
@@ -834,7 +827,7 @@
     // Repeated calls to |curl_multi_info_read| will return a new struct each
     // time, until a NULL is returned as a signal that there is no more to get
     // at this point.
-    int msgs_in_queue;
+    int msgs_in_queue{};
     CURLMsg* curl_msg =
         curl_multi_info_read(curl_multi_handle_, &msgs_in_queue);
     if (curl_msg == nullptr)
diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h
index 4e91b69..0e34f9d 100644
--- a/libcurl_http_fetcher.h
+++ b/libcurl_http_fetcher.h
@@ -82,8 +82,7 @@
 
 class LibcurlHttpFetcher : public HttpFetcher {
  public:
-  LibcurlHttpFetcher(ProxyResolver* proxy_resolver,
-                     HardwareInterface* hardware);
+  explicit LibcurlHttpFetcher(HardwareInterface* hardware);
 
   // Cleans up all internal state. Does not notify delegate
   ~LibcurlHttpFetcher() override;
@@ -165,10 +164,6 @@
   // closing a socket created with the CURLOPT_OPENSOCKETFUNCTION callback.
   static int LibcurlCloseSocketCallback(void* clientp, curl_socket_t item);
 
-  // Callback for when proxy resolution has completed. This begins the
-  // transfer.
-  void ProxiesResolved();
-
   // Asks libcurl for the http response code and stores it in the object.
   virtual void GetHttpResponseCode();
 
diff --git a/libcurl_http_fetcher_unittest.cc b/libcurl_http_fetcher_unittest.cc
index 3543870..a944f37 100644
--- a/libcurl_http_fetcher_unittest.cc
+++ b/libcurl_http_fetcher_unittest.cc
@@ -23,7 +23,6 @@
 #include <gtest/gtest.h>
 
 #include "update_engine/common/fake_hardware.h"
-#include "update_engine/common/mock_proxy_resolver.h"
 #include "update_engine/mock_libcurl_http_fetcher.h"
 
 using std::string;
@@ -44,7 +43,7 @@
 
   brillo::FakeMessageLoop loop_{nullptr};
   FakeHardware fake_hardware_;
-  MockLibcurlHttpFetcher libcurl_fetcher_{nullptr, &fake_hardware_};
+  MockLibcurlHttpFetcher libcurl_fetcher_{&fake_hardware_};
   UnresolvedHostStateMachine state_machine_;
 };
 
diff --git a/liburing_cpp/.gitignore b/liburing_cpp/.gitignore
new file mode 100644
index 0000000..e52e042
--- /dev/null
+++ b/liburing_cpp/.gitignore
@@ -0,0 +1,6 @@
+build
+.xmake
+.cache
+.vscode
+compile_commands.json
+
diff --git a/liburing_cpp/Android.bp b/liburing_cpp/Android.bp
new file mode 100644
index 0000000..0daa48d
--- /dev/null
+++ b/liburing_cpp/Android.bp
@@ -0,0 +1,40 @@
+
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "system_update_engine_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["system_update_engine_license"],
+}
+
+cc_library {
+	name: "liburing_cpp",
+	host_supported: true,
+	recovery_available: true,
+	srcs: [
+		"src/IoUring.cpp",
+		"src/IoUringSQE.cpp",
+	],
+	static_libs: [
+		"liburing",
+	],
+	include_dirs: ["bionic/libc/kernel"],
+	export_include_dirs: [
+		"include",
+	],
+}
+
+
+cc_test_host {
+	name: "liburing_cpp_tests",
+	srcs: [
+		"tests/BasicTests.cpp",
+		"tests/main.cpp",
+	],
+	static_libs: [
+		"libgtest",
+		"liburing",
+		"liburing_cpp",
+	],
+}
diff --git a/liburing_cpp/README.md b/liburing_cpp/README.md
new file mode 100644
index 0000000..1efabae
--- /dev/null
+++ b/liburing_cpp/README.md
@@ -0,0 +1,26 @@
+# liburing_cpp
+
+This project provides a idiomatic C++ wrapper for liburing.
+
+## Source Code Headers
+
+Every file containing source code must include copyright and license
+information. This includes any JS/CSS files that you might be serving out to
+browsers. (This is to help well-intentioned people avoid accidental copying that
+doesn't comply with the license.)
+
+Apache header:
+
+    Copyright 2022 Google LLC
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+        https://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
diff --git a/liburing_cpp/include/liburing_cpp/BufferView.h b/liburing_cpp/include/liburing_cpp/BufferView.h
new file mode 100644
index 0000000..eff93df
--- /dev/null
+++ b/liburing_cpp/include/liburing_cpp/BufferView.h
@@ -0,0 +1,50 @@
+//
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef __BUFFER_VIEW_CPP_H
+#define __BUFFER_VIEW_CPP_H
+
+#include <stddef.h>
+#include <assert.h>
+
+// non-owning reference to a contiguous memory region. Similar to
+// std::string_view, but allows you to modify underlying memory region.
+template <typename T>
+struct BufferView {
+  constexpr BufferView() = default;
+  constexpr BufferView(T* data, size_t size) : ptr_(data), size_(size) {}
+
+  T* data() { return ptr_; }
+  size_t size() const { return size_; }
+  T* begin() { return ptr_; };
+  T* end() { return ptr_ + size_; }
+  bool empty() const { return size_ == 0; }
+
+  T& operator[](const size_t idx) {
+    assert(idx < size_);
+    return ptr_[idx];
+  }
+  const T& operator[](const size_t idx) const {
+    assert(idx < size_);
+    return ptr_[idx];
+  }
+
+ private:
+  T* ptr_{};
+  size_t size_{};
+};
+
+#endif
diff --git a/liburing_cpp/include/liburing_cpp/IoUring.h b/liburing_cpp/include/liburing_cpp/IoUring.h
new file mode 100644
index 0000000..09ed5cc
--- /dev/null
+++ b/liburing_cpp/include/liburing_cpp/IoUring.h
@@ -0,0 +1,86 @@
+//
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef __IO_URING_CPP_H
+#define __IO_URING_CPP_H
+
+#include <errno.h>
+#include <string.h>
+
+#include <sys/uio.h>
+
+#include <algorithm>
+#include <memory>
+#include <optional>
+#include <variant>
+
+#include "IoUringCQE.h"
+#include "IoUringSQE.h"
+
+namespace io_uring_cpp {
+
+template <typename Err, typename Res>
+struct [[nodiscard]] Result : public std::variant<Err, Res> {
+  constexpr bool IsOk() const { return std::holds_alternative<Res>(*this); }
+  constexpr bool IsErr() const { return std::holds_alternative<Err>(*this); }
+  constexpr Err GetError() const { return std::get<Err>(*this); }
+  constexpr const Res& GetResult() const& { return std::get<Res>(*this); }
+  constexpr Res&& GetResult() && { return std::get<Res>(*this); }
+};
+
+class IoUringInterface {
+ public:
+  virtual ~IoUringInterface() {}
+  // Registration helpers
+  // Register a fixed set of buffers to kernel.
+  virtual Errno RegisterBuffers(const struct iovec* iovecs,
+                                size_t iovec_size) = 0;
+  virtual Errno UnregisterBuffers() = 0;
+
+  // Register a set of file descriptors to kernel.
+  virtual Errno RegisterFiles(const int* files, size_t files_size) = 0;
+  virtual Errno UnregisterFiles() = 0;
+  // Append a submission entry into this io_uring. This does not submit the
+  // operation to the kernel. For that, call |IoUringInterface::Submit()|
+  virtual IoUringSQE PrepRead(int fd, void *buf, unsigned nbytes,
+                              uint64_t offset) = 0;
+  // Caller is responsible for making sure the input memory is available until
+  // this write operation completes.
+  virtual IoUringSQE PrepWrite(int fd, const void *buf, unsigned nbytes,
+                               uint64_t offset) = 0;
+
+  // Return number of SQEs available in the queue. If this is 0, subsequent
+  // calls to Prep*() functions will fail.
+  virtual size_t SQELeft() const = 0;
+  // Return number of SQEs currently in the queue. SQEs already submitted would
+  // not be counted.
+  virtual size_t SQEReady() const = 0;
+
+  // Ring operations
+  virtual IoUringSubmitResult Submit() = 0;
+  // Submit and block until |completions| number of CQEs are available
+  virtual IoUringSubmitResult SubmitAndWait(size_t completions) = 0;
+  virtual Result<Errno, IoUringCQE> PopCQE() = 0;
+  virtual Result<Errno, std::vector<IoUringCQE>> PopCQE(unsigned int count) = 0;
+  virtual Result<Errno, IoUringCQE> PeekCQE() = 0;
+
+  static std::unique_ptr<IoUringInterface> CreateLinuxIoUring(int queue_depth,
+                                                              int flags);
+};
+
+}  // namespace io_uring_cpp
+
+#endif
diff --git a/liburing_cpp/include/liburing_cpp/IoUringCQE.h b/liburing_cpp/include/liburing_cpp/IoUringCQE.h
new file mode 100644
index 0000000..e75731e
--- /dev/null
+++ b/liburing_cpp/include/liburing_cpp/IoUringCQE.h
@@ -0,0 +1,49 @@
+//
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef __IO_URING_CQE_CPP_H
+#define __IO_URING_CQE_CPP_H
+
+#include <stdint.h>
+
+#include <type_traits>
+
+namespace io_uring_cpp {
+
+struct IoUringCQE {
+  int32_t res;
+  uint32_t flags;
+  template <typename T>
+  T GetData() const {
+    static_assert(
+        std::is_trivially_copy_constructible_v<T>,
+        "Only trivially copiable types can be passed for io_uring data");
+    static_assert(sizeof(T) <= 8,
+                  "io_uring SQE's data field has size of 8 bytes, can't pass "
+                  "data larger than that.");
+    return *reinterpret_cast<const T*>(&userdata);
+  }
+
+  constexpr IoUringCQE(int32_t res, uint32_t flags, uint64_t userdata)
+      : res(res), flags(flags), userdata(userdata) {}
+
+ private:
+  uint64_t userdata;
+};
+
+}  // namespace io_uring_cpp
+
+#endif
diff --git a/liburing_cpp/include/liburing_cpp/IoUringSQE.h b/liburing_cpp/include/liburing_cpp/IoUringSQE.h
new file mode 100644
index 0000000..ca2b365
--- /dev/null
+++ b/liburing_cpp/include/liburing_cpp/IoUringSQE.h
@@ -0,0 +1,79 @@
+//
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include <errno.h>
+#include <string.h>
+
+#include <algorithm>
+#include <iostream>
+#include <numeric>
+#include <type_traits>
+#include <vector>
+
+#ifndef __IO_URING_SQE_CPP_H
+#define __IO_URING_SQE_CPP_H
+
+namespace io_uring_cpp {
+
+struct [[nodiscard]] IoUringSubmitResult {
+  constexpr bool IsOk() const { return ret > 0; }
+  const char *ErrMsg() const {
+    if (IsOk()) {
+      return nullptr;
+    }
+    return strerror(-ret);
+  }
+  constexpr auto ErrCode() const { return std::min(ret, 0); }
+  constexpr auto EntriesSubmitted() const { return std::max(ret, 0); }
+
+  int ret;
+};
+
+struct [[nodiscard]] Errno {
+  constexpr Errno(int ret) : error_code(ret) {
+    error_code = std::abs(error_code);
+  }
+  int error_code;
+  constexpr int ErrCode() { return error_code; }
+  const char *ErrMsg();
+  constexpr bool IsOk() const { return error_code == 0; }
+};
+
+std::ostream &operator<<(std::ostream &, Errno err);
+
+struct [[nodiscard]] IoUringSQE {
+  constexpr IoUringSQE(void *p) : sqe(p) {}
+  IoUringSQE &SetFlags(unsigned int flags);
+  template <typename T>
+  IoUringSQE &SetData(const T &data) {
+    static_assert(
+        std::is_trivially_copy_constructible_v<T>,
+        "Only trivially copiable types can be passed for io_uring data");
+    static_assert(sizeof(T) <= 8,
+                  "io_uring SQE's data field has size of 8 bytes, can't pass "
+                  "data larger than that.");
+    return SetData(*reinterpret_cast<const uint64_t*>(&data));
+  }
+  IoUringSQE& SetData(uint64_t data);
+
+  constexpr bool IsOk() const { return sqe != nullptr; }
+
+ private:
+  void *sqe;
+};
+
+}  // namespace io_uring_cpp
+
+#endif
diff --git a/liburing_cpp/src/IoUring.cpp b/liburing_cpp/src/IoUring.cpp
new file mode 100644
index 0000000..f561d25
--- /dev/null
+++ b/liburing_cpp/src/IoUring.cpp
@@ -0,0 +1,191 @@
+//
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <asm-generic/errno-base.h>
+#include <liburing_cpp/IoUring.h>
+#include <string.h>
+
+#include <algorithm>
+#include <iostream>
+#include <memory>
+
+#include "liburing.h"
+#include "liburing_cpp/IoUringCQE.h"
+
+namespace io_uring_cpp {
+
+template <typename T>
+bool IsZeroInitialized(const T& val) {
+  auto begin = reinterpret_cast<const char*>(&val);
+  auto end = begin + sizeof(val);
+  return std::all_of(begin, end, [](const auto& a) { return a == 0; });
+}
+
+class IoUring final : public IoUringInterface {
+ public:
+  ~IoUring() override {
+    if (!IsZeroInitialized(ring)) {
+      if (buffer_registered_) {
+        UnregisterBuffers();
+      }
+      if (files_registered_) {
+        UnregisterFiles();
+      }
+      io_uring_queue_exit(&ring);
+    }
+  }
+  IoUring(const IoUring&) = delete;
+  IoUring(IoUring&& rhs) {
+    ring = rhs.ring;
+    memset(&rhs.ring, 0, sizeof(rhs.ring));
+  }
+  IoUring& operator=(IoUring&& rhs) {
+    std::swap(ring, rhs.ring);
+    return *this;
+  }
+  Errno RegisterBuffers(const struct iovec* iovecs,
+                        size_t iovec_size) override {
+    const auto ret =
+        Errno(io_uring_register_buffers(&ring, iovecs, iovec_size));
+    buffer_registered_ = ret.IsOk();
+    return ret;
+  }
+
+  Errno UnregisterBuffers() override {
+    const auto ret = Errno(io_uring_unregister_buffers(&ring));
+    buffer_registered_ = !ret.IsOk();
+    return ret;
+  }
+
+  Errno RegisterFiles(const int* files, size_t files_size) override {
+    const auto ret = Errno(io_uring_register_files(&ring, files, files_size));
+    files_registered_ = ret.IsOk();
+    return ret;
+  }
+
+  Errno UnregisterFiles() {
+    const auto ret = Errno(io_uring_unregister_files(&ring));
+    files_registered_ = !ret.IsOk();
+    return ret;
+  }
+
+  IoUringSQE PrepRead(int fd, void* buf, unsigned nbytes,
+                      uint64_t offset) override {
+    auto sqe = io_uring_get_sqe(&ring);
+    if (sqe == nullptr) {
+      return IoUringSQE{nullptr};
+    }
+    io_uring_prep_read(sqe, fd, buf, nbytes, offset);
+    return IoUringSQE{static_cast<void*>(sqe)};
+  }
+  IoUringSQE PrepWrite(int fd, const void* buf, unsigned nbytes,
+                       uint64_t offset) override {
+    auto sqe = io_uring_get_sqe(&ring);
+    if (sqe == nullptr) {
+      return IoUringSQE{nullptr};
+    }
+    io_uring_prep_write(sqe, fd, buf, nbytes, offset);
+    return IoUringSQE{static_cast<void*>(sqe)};
+  }
+
+  size_t SQELeft() const override { return io_uring_sq_space_left(&ring); }
+  size_t SQEReady() const override { return io_uring_sq_ready(&ring); }
+
+  IoUringSubmitResult Submit() override {
+    return IoUringSubmitResult{io_uring_submit(&ring)};
+  }
+
+  IoUringSubmitResult SubmitAndWait(size_t completions) override {
+    return IoUringSubmitResult{io_uring_submit_and_wait(&ring, completions)};
+  }
+
+  Result<Errno, std::vector<IoUringCQE>> PopCQE(
+      const unsigned int count) override {
+    std::vector<io_uring_cqe*> cqe_ptrs;
+    cqe_ptrs.resize(count);
+    const auto ret = io_uring_wait_cqe_nr(&ring, cqe_ptrs.data(), count);
+    if (ret != 0) {
+      return {Errno(ret)};
+    }
+    const auto filled = io_uring_peek_batch_cqe(&ring, cqe_ptrs.data(), count);
+    if (filled != count) {
+      return {Errno(EAGAIN)};
+    }
+    std::vector<IoUringCQE> cqes;
+    cqes.reserve(count);
+    for (const auto& cqe : cqe_ptrs) {
+      if (cqe == nullptr) {
+        return {Errno(EAGAIN)};
+      }
+      cqes.push_back(IoUringCQE(cqe->res, cqe->flags, cqe->user_data));
+      io_uring_cqe_seen(&ring, cqe);
+    }
+    return {cqes};
+  }
+
+  Result<Errno, IoUringCQE> PopCQE() override {
+    struct io_uring_cqe* ptr{};
+    const auto ret = io_uring_wait_cqe(&ring, &ptr);
+    if (ret != 0) {
+      return {Errno(ret)};
+    }
+    const auto cqe = IoUringCQE(ptr->res, ptr->flags, ptr->user_data);
+    io_uring_cqe_seen(&ring, ptr);
+    return {cqe};
+  }
+
+  Result<Errno, IoUringCQE> PeekCQE() override {
+    struct io_uring_cqe* ptr{};
+    const auto ret = io_uring_peek_cqe(&ring, &ptr);
+    if (ret != 0) {
+      return {Errno(ret)};
+    }
+    return {IoUringCQE(ptr->res, ptr->flags, ptr->user_data)};
+  }
+
+  IoUring(struct io_uring r) : ring(r) {}
+
+ private:
+  struct io_uring ring {};
+  bool buffer_registered_ = false;
+  bool files_registered_ = false;
+  std::atomic<size_t> request_id_{};
+};
+
+const char* Errno::ErrMsg() {
+  if (error_code == 0) {
+    return nullptr;
+  }
+  return strerror(error_code);
+}
+
+std::ostream& operator<<(std::ostream& out, Errno err) {
+  out << err.ErrCode() << ", " << err.ErrMsg();
+  return out;
+}
+
+std::unique_ptr<IoUringInterface> IoUringInterface::CreateLinuxIoUring(
+    int queue_depth, int flags) {
+  struct io_uring ring {};
+  const auto err = io_uring_queue_init(queue_depth, &ring, flags);
+  if (err) {
+    errno = -err;
+    return {};
+  }
+  return std::unique_ptr<IoUringInterface>(new IoUring(ring));
+}
+
+}  // namespace io_uring_cpp
diff --git a/liburing_cpp/src/IoUringSQE.cpp b/liburing_cpp/src/IoUringSQE.cpp
new file mode 100644
index 0000000..fc3d3ea
--- /dev/null
+++ b/liburing_cpp/src/IoUringSQE.cpp
@@ -0,0 +1,40 @@
+//
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <liburing_cpp/IoUringSQE.h>
+
+#include <cstdint>
+
+#include "liburing.h"
+
+namespace io_uring_cpp {
+
+IoUringSQE &IoUringSQE::SetFlags(unsigned int flags) {
+  if (IsOk()) {
+    ::io_uring_sqe_set_flags(static_cast<struct io_uring_sqe *>(sqe), flags);
+  }
+  return *this;
+}
+
+IoUringSQE &IoUringSQE::SetData(uint64_t data) {
+  if (IsOk()) {
+    ::io_uring_sqe_set_data(static_cast<struct io_uring_sqe *>(sqe),
+                            reinterpret_cast<void *>(data));
+  }
+  return *this;
+}
+
+}  // namespace io_uring_cpp
diff --git a/liburing_cpp/tests/BasicTests.cpp b/liburing_cpp/tests/BasicTests.cpp
new file mode 100644
index 0000000..81288f6
--- /dev/null
+++ b/liburing_cpp/tests/BasicTests.cpp
@@ -0,0 +1,194 @@
+//
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <gtest/gtest.h>
+#include <liburing_cpp/IoUring.h>
+
+#include <linux/fs.h>
+#include <stdio.h>
+
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/utsname.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <array>
+#include <cstring>
+#include <vector>
+
+using namespace io_uring_cpp;
+
+class IoUringTest : public ::testing::Test {
+ public:
+  IoUringTest() { fp = tmpfile(); }
+  ~IoUringTest() {
+    if (fp) {
+      fclose(fp);
+    }
+  }
+  void SetUp() override {
+    struct utsname buffer {};
+
+    ASSERT_EQ(uname(&buffer), 0)
+        << strerror(errno) << "Failed to get kernel version number";
+    int major = 0;
+    int minor = 0;
+    const auto matched = sscanf(buffer.release, "%d.%d", &major, &minor);
+    ASSERT_EQ(matched, 2) << "Unexpected kernel version format: "
+                          << buffer.release;
+
+    if (major < 5 || (major == 5 && minor < 6)) {
+      GTEST_SKIP() << "Kernel version does not support io_uring "
+                   << buffer.release;
+      return;
+    }
+
+    ring = IoUringInterface::CreateLinuxIoUring(4096, 0);
+    ASSERT_NE(ring, nullptr);
+  }
+  void Write(int fd, const void* data, const size_t len) {
+    const auto buf = static_cast<const char*>(data);
+    constexpr size_t IO_BATCH_SIZE = 4096;
+    size_t i = 0;
+    for (i = 0; i < len; i += IO_BATCH_SIZE) {
+      const auto sqe = ring->PrepWrite(fd, buf + i, IO_BATCH_SIZE, i);
+      ASSERT_TRUE(sqe.IsOk());
+    }
+    const auto bytes_remaining = len - i;
+    if (bytes_remaining) {
+      ASSERT_TRUE(ring->PrepWrite(fd, buf + i, bytes_remaining, i).IsOk());
+    }
+    const auto ret = ring->Submit();
+    ASSERT_TRUE(ret.IsOk()) << ret.ErrMsg();
+    for (size_t i = (len + IO_BATCH_SIZE - 1) / IO_BATCH_SIZE; i > 0; i--) {
+      const auto cqe = ring->PopCQE();
+      ASSERT_TRUE(cqe.IsOk());
+      ASSERT_GT(cqe.GetResult().res, 0);
+    }
+  }
+  std::unique_ptr<IoUringInterface> ring;
+  FILE* fp = nullptr;
+};
+
+TEST_F(IoUringTest, SmallRead) {
+  int fd = open("/proc/self/maps", O_RDONLY);
+  std::array<char, 1024> buf{};
+  const auto sqe = ring->PrepRead(fd, buf.data(), buf.size(), 0);
+  ASSERT_TRUE(sqe.IsOk()) << "Submission Queue is full!";
+  const auto ret = ring->Submit();
+  ASSERT_TRUE(ret.IsOk()) << ret.ErrMsg();
+  const auto cqe = ring->PopCQE();
+  ASSERT_TRUE(cqe.IsOk()) << cqe.GetError();
+  ASSERT_GT(cqe.GetResult().res, 0);
+}
+
+TEST_F(IoUringTest, SmallWrite) {
+  auto fp = tmpfile();
+  int fd = fileno(fp);
+  std::string buffer(256, 'A');
+  const auto sqe = ring->PrepWrite(fd, buffer.data(), buffer.size(), 0);
+  ASSERT_TRUE(sqe.IsOk()) << "Submission Queue is full!";
+  const auto ret = ring->Submit();
+  ASSERT_TRUE(ret.IsOk()) << ret.ErrMsg();
+  const auto cqe = ring->PopCQE();
+  ASSERT_TRUE(cqe.IsOk()) << cqe.GetError();
+
+  const auto bytes_read = pread(fd, buffer.data(), buffer.size(), 0);
+
+  ASSERT_EQ(bytes_read, buffer.size());
+
+  ASSERT_TRUE(std::all_of(buffer.begin(), buffer.end(), [](const auto& a) {
+    return a == 'A';
+  })) << buffer;
+  fclose(fp);
+}
+
+TEST_F(IoUringTest, ChunkedWrite) {
+  int fd = fileno(fp);
+  std::string buffer(16 * 1024 * 1024, 'A');
+  ASSERT_NO_FATAL_FAILURE(Write(fd, buffer.data(), buffer.size()));
+
+  const auto bytes_read = pread(fd, buffer.data(), buffer.size(), 0);
+
+  ASSERT_EQ(bytes_read, buffer.size());
+
+  ASSERT_TRUE(std::all_of(buffer.begin(), buffer.end(), [](const auto& a) {
+    return a == 'A';
+  })) << buffer;
+}
+
+// Page size doesn't really matter. We can replace 4096 with any value.
+static constexpr size_t kBlockSize = 4096;
+constexpr std::array<unsigned char, 4096> GetArbitraryPageData() {
+  std::array<unsigned char, kBlockSize> arr{};
+  int i = 0;
+  for (auto& a : arr) {
+    a = i++;
+  }
+  return arr;
+}
+
+void WriteTestData(int fd, const size_t offset, const size_t size) {
+  ASSERT_EQ(size % kBlockSize, 0);
+  static const auto data = GetArbitraryPageData();
+  size_t bytes_written = 0;
+  size_t cur_offset = offset;
+  while (bytes_written < size) {
+    const auto ret = pwrite(fd, data.data(), kBlockSize, cur_offset);
+    ASSERT_GT(ret, 0) << "Failed to pwrite " << strerror(errno);
+    bytes_written += ret;
+    cur_offset += ret;
+  }
+}
+
+TEST_F(IoUringTest, ExtentRead) {
+  const int fd = fileno(fp);
+  ASSERT_NO_FATAL_FAILURE(WriteTestData(fd, kBlockSize * 3, kBlockSize));
+  ASSERT_NO_FATAL_FAILURE(WriteTestData(fd, kBlockSize * 5, kBlockSize));
+  ASSERT_NO_FATAL_FAILURE(WriteTestData(fd, kBlockSize * 8, kBlockSize));
+  ASSERT_NO_FATAL_FAILURE(WriteTestData(fd, kBlockSize * 13, kBlockSize));
+  fsync(fd);
+
+  std::vector<unsigned char> data;
+  data.resize(kBlockSize * 4);
+
+  ASSERT_TRUE(
+      ring->PrepRead(fd, data.data(), kBlockSize, 3 * kBlockSize).IsOk());
+  ASSERT_TRUE(
+      ring->PrepRead(fd, data.data() + kBlockSize, kBlockSize, 5 * kBlockSize)
+          .IsOk());
+  ASSERT_TRUE(
+      ring->PrepRead(
+              fd, data.data() + kBlockSize * 2, kBlockSize, 8 * kBlockSize)
+          .IsOk());
+  ASSERT_TRUE(
+      ring->PrepRead(
+              fd, data.data() + kBlockSize * 3, kBlockSize, 13 * kBlockSize)
+          .IsOk());
+  ring->SubmitAndWait(4);
+  const auto cqes = ring->PopCQE(4);
+  if (cqes.IsErr()) {
+    FAIL() << cqes.GetError().ErrMsg();
+    return;
+  }
+  for (const auto& cqe : cqes.GetResult()) {
+    ASSERT_GT(cqe.res, 0);
+  }
+  for (int i = 0; i < data.size(); ++i) {
+    ASSERT_EQ(data[i], i % 256);
+  }
+}
\ No newline at end of file
diff --git a/liburing_cpp/tests/main.cpp b/liburing_cpp/tests/main.cpp
new file mode 100644
index 0000000..9874bf2
--- /dev/null
+++ b/liburing_cpp/tests/main.cpp
@@ -0,0 +1,22 @@
+//
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <gtest/gtest.h>
+
+int main(int argc, char *argv[]) {
+  ::testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
\ No newline at end of file
diff --git a/liburing_cpp/xmake.lua b/liburing_cpp/xmake.lua
new file mode 100644
index 0000000..93e52ae
--- /dev/null
+++ b/liburing_cpp/xmake.lua
@@ -0,0 +1,19 @@
+add_requires("liburing", "gtest")
+
+target("liburing_cpp")
+  set_kind("static")
+  add_files("src/*.cpp")
+  add_packages("liburing")
+  set_languages("c++17")
+  add_includedirs("include", {public = true})
+  add_cxflags("-g")
+
+
+target("liburing_cpp_tests")
+  set_kind("binary")
+  add_files("tests/*.cpp")
+  set_languages("c++17")
+  add_deps("liburing_cpp")
+  add_packages("gtest", "liburing")
+  add_cxflags("-g")
+
diff --git a/main.cc b/main.cc
index a23a08b..103e1a1 100644
--- a/main.cc
+++ b/main.cc
@@ -21,25 +21,24 @@
 #include <base/at_exit.h>
 #include <base/command_line.h>
 #include <base/logging.h>
-#include <brillo/flag_helper.h>
+#include <gflags/gflags.h>
 
 #include "update_engine/common/daemon_base.h"
 #include "update_engine/common/logging.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/terminator.h"
-#include "update_engine/common/utils.h"
 
 using std::string;
+DEFINE_bool(logtofile, false, "Write logs to a file in log_dir.");
+DEFINE_bool(logtostderr,
+            false,
+            "Write logs to stderr instead of to a file in log_dir.");
+DEFINE_bool(foreground, false, "Don't daemon()ize; run in foreground.");
 
 int main(int argc, char** argv) {
-  DEFINE_bool(logtofile, false, "Write logs to a file in log_dir.");
-  DEFINE_bool(logtostderr,
-              false,
-              "Write logs to stderr instead of to a file in log_dir.");
-  DEFINE_bool(foreground, false, "Don't daemon()ize; run in foreground.");
-
   chromeos_update_engine::Terminator::Init();
-  brillo::FlagHelper::Init(argc, argv, "A/B Update Engine");
+  gflags::SetUsageMessage("A/B Update Engine");
+  gflags::ParseCommandLineFlags(&argc, &argv, true);
 
   // We have two logging flags "--logtostderr" and "--logtofile"; and the logic
   // to choose the logging destination is:
diff --git a/metrics_utils.h b/metrics_utils.h
index 16e9eec..2f79140 100644
--- a/metrics_utils.h
+++ b/metrics_utils.h
@@ -17,7 +17,11 @@
 #ifndef UPDATE_ENGINE_METRICS_UTILS_H_
 #define UPDATE_ENGINE_METRICS_UTILS_H_
 
+#include <chrono>
 #include <string>
+#include <string_view>
+#include <type_traits>
+#include <utility>
 
 #include <base/time/time.h>
 
@@ -81,6 +85,69 @@
                                PrefsInterface* prefs,
                                ClockInterface* clock);
 
+template <typename T>
+class PersistedValue {
+ public:
+  PersistedValue(std::string_view key, PrefsInterface* prefs)
+      : key_(key), prefs_(prefs) {
+    val_ = metrics_utils::GetPersistedValue(key, prefs);
+  }
+  ~PersistedValue() { Flush(true); }
+  void Delete() {
+    val_ = {};
+    prefs_->Delete(key_);
+  }
+  T get() const { return val_; }
+  using clock = std::chrono::system_clock;
+  using time_point = clock::time_point;
+  // prefix increment
+  PersistedValue<T>& operator++() {
+    ++val_;
+    Flush();
+    return *this;
+  }
+  PersistedValue<T>& operator--() {
+    --val_;
+    Flush();
+    return *this;
+  }
+  PersistedValue<T>& operator+=(T&& t) {
+    val_ += std::forward<T>(t);
+    Flush();
+    return *this;
+  }
+  PersistedValue<T>& operator-=(T&& t) {
+    val_ -= std::forward<T>(t);
+    Flush();
+    return *this;
+  }
+  PersistedValue<T>& operator=(T&& t) {
+    val_ = std::forward<T>(t);
+    Flush();
+    return *this;
+  }
+  void Flush(bool force = false) {
+    auto now = clock::now();
+    if (now - last_save_ > metrics::kMetricFlushInterval || force) {
+      last_save_ = now;
+      if (std::is_integral_v<T>) {
+        prefs_->SetInt64(key_, val_);
+      } else if (std::is_same_v<T, bool>) {
+        prefs_->SetBoolean(key_, val_);
+      } else {
+        auto value = std::to_string(val_);
+        prefs_->SetString(key_, value);
+      }
+    }
+  }
+
+ private:
+  const std::string_view key_;
+  PrefsInterface* prefs_;
+  T val_;
+  time_point last_save_{};
+};
+
 }  // namespace metrics_utils
 }  // namespace chromeos_update_engine
 
diff --git a/mock_libcurl_http_fetcher.h b/mock_libcurl_http_fetcher.h
index a14f953..054d878 100644
--- a/mock_libcurl_http_fetcher.h
+++ b/mock_libcurl_http_fetcher.h
@@ -25,9 +25,8 @@
 
 class MockLibcurlHttpFetcher : public LibcurlHttpFetcher {
  public:
-  MockLibcurlHttpFetcher(ProxyResolver* proxy_resolver,
-                         HardwareInterface* hardware)
-      : LibcurlHttpFetcher(proxy_resolver, hardware) {}
+  MockLibcurlHttpFetcher(HardwareInterface* hardware)
+      : LibcurlHttpFetcher(hardware) {}
 
   MOCK_METHOD0(GetHttpResponseCode, void());
 };
diff --git a/payload_consumer/cached_file_descriptor.cc b/payload_consumer/cached_file_descriptor.cc
index aa0dbcd..ea56b8b 100644
--- a/payload_consumer/cached_file_descriptor.cc
+++ b/payload_consumer/cached_file_descriptor.cc
@@ -96,4 +96,7 @@
   return true;
 }
 
+void UnownedCachedFileDescriptor::SetFD(FileDescriptor* fd) {
+  fd_ = fd;
+}
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/cached_file_descriptor.h b/payload_consumer/cached_file_descriptor.h
index 1193455..a428d80 100644
--- a/payload_consumer/cached_file_descriptor.h
+++ b/payload_consumer/cached_file_descriptor.h
@@ -31,7 +31,7 @@
 
 class CachedFileDescriptorBase : public FileDescriptor {
  public:
-  CachedFileDescriptorBase(size_t cache_size) : cache_(cache_size) {}
+  explicit CachedFileDescriptorBase(size_t cache_size) : cache_(cache_size) {}
   ~CachedFileDescriptorBase() override = default;
 
   bool Open(const char* path, int flags, mode_t mode) override {
@@ -85,6 +85,8 @@
  public:
   UnownedCachedFileDescriptor(FileDescriptor* fd, size_t cache_size)
       : CachedFileDescriptorBase(cache_size), fd_(fd) {}
+  // used for EnocdeFEC
+  void SetFD(FileDescriptor* fd);
 
  protected:
   virtual FileDescriptor* GetFd() { return fd_; }
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index fc8858f..3b9f2b6 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -28,40 +28,36 @@
 #include <utility>
 #include <vector>
 
+#include <android-base/properties.h>
+#include <android-base/strings.h>
 #include <base/files/file_util.h>
 #include <base/format_macros.h>
 #include <base/metrics/histogram_macros.h>
 #include <base/strings/string_number_conversions.h>
+#include <base/strings/stringprintf.h>
 #include <base/time/time.h>
 #include <brillo/data_encoding.h>
 #include <bsdiff/bspatch.h>
 #include <google/protobuf/repeated_field.h>
 #include <puffin/puffpatch.h>
 
+#include "libsnapshot/cow_format.h"
 #include "update_engine/common/constants.h"
 #include "update_engine/common/download_action.h"
 #include "update_engine/common/error_code.h"
 #include "update_engine/common/error_code_utils.h"
 #include "update_engine/common/hardware_interface.h"
 #include "update_engine/common/prefs_interface.h"
-#include "update_engine/common/subprocess.h"
 #include "update_engine/common/terminator.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/bzip_extent_writer.h"
-#include "update_engine/payload_consumer/cached_file_descriptor.h"
-#include "update_engine/payload_consumer/certificate_parser_interface.h"
-#include "update_engine/payload_consumer/extent_reader.h"
-#include "update_engine/payload_consumer/extent_writer.h"
 #include "update_engine/payload_consumer/partition_update_generator_interface.h"
 #include "update_engine/payload_consumer/partition_writer.h"
+#include "update_engine/update_metadata.pb.h"
 #if USE_FEC
 #include "update_engine/payload_consumer/fec_file_descriptor.h"
 #endif  // USE_FEC
-#include "update_engine/payload_consumer/file_descriptor_utils.h"
-#include "update_engine/payload_consumer/mount_history.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
-#include "update_engine/payload_consumer/xz_extent_writer.h"
 
 using google::protobuf::RepeatedPtrField;
 using std::min;
@@ -206,6 +202,9 @@
 }
 
 int DeltaPerformer::Close() {
+  // Checkpoint update progress before canceling, so that subsequent attempts
+  // can resume from exactly where update_engine left last time.
+  CheckpointUpdateProgress(true);
   int err = -CloseCurrentPartition();
   LOG_IF(ERROR,
          !payload_hash_calculator_.Finalize() ||
@@ -398,6 +397,45 @@
       base::TimeDelta::FromMinutes(5),                                      \
       20);
 
+bool DeltaPerformer::CheckSPLDowngrade() {
+  if (!manifest_.has_security_patch_level()) {
+    return true;
+  }
+  if (manifest_.security_patch_level().empty()) {
+    return true;
+  }
+  const auto new_spl = manifest_.security_patch_level();
+  const auto current_spl =
+      android::base::GetProperty("ro.build.version.security_patch", "");
+  if (current_spl.empty()) {
+    LOG(WARNING) << "Failed to get ro.build.version.security_patch, unable to "
+                    "determine if this OTA is a SPL downgrade. Assuming this "
+                    "OTA is not SPL downgrade.";
+    return true;
+  }
+  if (new_spl < current_spl) {
+    const auto avb_state =
+        android::base::GetProperty("ro.boot.verifiedbootstate", "green");
+    if (android::base::EqualsIgnoreCase(avb_state, "green")) {
+      LOG(ERROR) << "Target build SPL " << new_spl
+                 << " is older than current build's SPL " << current_spl
+                 << ", this OTA is an SPL downgrade. Your device's "
+                    "ro.boot.verifiedbootstate="
+                 << avb_state
+                 << ", it probably has a locked bootlaoder. Since a locked "
+                    "bootloader will reject SPL downgrade no matter what, we "
+                    "will reject this OTA.";
+      return false;
+    }
+    install_plan_->powerwash_required = true;
+    LOG(WARNING)
+        << "Target build SPL " << new_spl
+        << " is older than current build's SPL " << current_spl
+        << ", this OTA is an SPL downgrade. Data wipe will be required";
+  }
+  return true;
+}
+
 // Wrapper around write. Returns true if all requested bytes
 // were written, or false on any error, regardless of progress
 // and stores an action exit code in |error|.
@@ -444,6 +482,61 @@
 
     block_size_ = manifest_.block_size();
 
+    if (!CheckSPLDowngrade()) {
+      *error = ErrorCode::kPayloadTimestampError;
+      return false;
+    }
+
+    // update estimate_cow_size if VABC is disabled
+    // new_cow_size per partition = partition_size - (#blocks in Copy
+    // operations part of the partition)
+    if (install_plan_->vabc_none) {
+      LOG(INFO) << "Setting Virtual AB Compression algorithm to none";
+      manifest_.mutable_dynamic_partition_metadata()
+          ->set_vabc_compression_param("none");
+      for (auto& partition : *manifest_.mutable_partitions()) {
+        auto new_cow_size = partition.new_partition_info().size();
+        for (const auto& operation : partition.merge_operations()) {
+          if (operation.type() == CowMergeOperation::COW_COPY) {
+            new_cow_size -=
+                operation.dst_extent().num_blocks() * manifest_.block_size();
+          }
+        }
+        // Every block written to COW device will come with a header which
+        // stores src/dst block info along with other data.
+        const auto cow_metadata_size = partition.new_partition_info().size() /
+                                       manifest_.block_size() *
+                                       sizeof(android::snapshot::CowOperation);
+        // update_engine will emit a label op every op or every two seconds,
+        // whichever one is longer. In the worst case, we add 1 label per
+        // InstallOp. So take size of label ops into account.
+        const auto label_ops_size = partition.operations_size() *
+                                    sizeof(android::snapshot::CowOperation);
+        // Adding extra 2MB headroom just for any unexpected space usage.
+        // If we overrun reserved COW size, entire OTA will fail
+        // and no way for user to retry OTA
+        partition.set_estimate_cow_size(new_cow_size + (1024 * 1024 * 2) +
+                                        cow_metadata_size + label_ops_size);
+        LOG(INFO) << "New COW size for partition " << partition.partition_name()
+                  << " is " << partition.estimate_cow_size();
+      }
+    }
+    if (install_plan_->disable_vabc) {
+      manifest_.mutable_dynamic_partition_metadata()->set_vabc_enabled(false);
+    }
+    if (install_plan_->enable_threading) {
+      manifest_.mutable_dynamic_partition_metadata()
+          ->mutable_vabc_feature_set()
+          ->set_threaded(true);
+      LOG(INFO) << "Attempting to enable multi-threaded compression for VABC";
+    }
+    if (install_plan_->batched_writes) {
+      manifest_.mutable_dynamic_partition_metadata()
+          ->mutable_vabc_feature_set()
+          ->set_batch_writes(true);
+      LOG(INFO) << "Attempting to enable batched writes for VABC";
+    }
+
     // This populates |partitions_| and the |install_plan.partitions| with the
     // list of partitions from the manifest.
     if (!ParseManifestPartitions(error))
@@ -549,7 +642,7 @@
 
     base::TimeTicks op_start_time = base::TimeTicks::Now();
 
-    bool op_result;
+    bool op_result{};
     const string op_name = InstallOperationTypeName(op.type());
     switch (op.type()) {
       case InstallOperation::REPLACE:
@@ -692,6 +785,13 @@
           partitions_, boot_control_, block_size_, error)) {
     return false;
   }
+  auto&& has_verity = [](const auto& part) {
+    return part.fec_extent().num_blocks() > 0 ||
+           part.hash_tree_extent().num_blocks() > 0;
+  };
+  if (!std::any_of(partitions_.begin(), partitions_.end(), has_verity)) {
+    install_plan_->write_verity = false;
+  }
 
   LogPartitionInfo(partitions_);
   return true;
@@ -1205,51 +1305,71 @@
                                      const string& update_check_response_hash) {
   int64_t next_operation = kUpdateStateOperationInvalid;
   if (!(prefs->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) &&
-        next_operation != kUpdateStateOperationInvalid && next_operation > 0))
+        next_operation != kUpdateStateOperationInvalid && next_operation > 0)) {
+    LOG(WARNING) << "Failed to resume update " << kPrefsUpdateStateNextOperation
+                 << " invalid: " << next_operation;
     return false;
+  }
 
   string interrupted_hash;
   if (!(prefs->GetString(kPrefsUpdateCheckResponseHash, &interrupted_hash) &&
         !interrupted_hash.empty() &&
-        interrupted_hash == update_check_response_hash))
+        interrupted_hash == update_check_response_hash)) {
+    LOG(WARNING) << "Failed to resume update " << kPrefsUpdateCheckResponseHash
+                 << " mismatch, last hash: " << interrupted_hash
+                 << ", current hash: " << update_check_response_hash << "";
     return false;
+  }
 
-  int64_t resumed_update_failures;
+  int64_t resumed_update_failures{};
   // Note that storing this value is optional, but if it is there it should
   // not be more than the limit.
   if (prefs->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures) &&
-      resumed_update_failures > kMaxResumedUpdateFailures)
+      resumed_update_failures > kMaxResumedUpdateFailures) {
+    LOG(WARNING) << "Failed to resume update " << kPrefsResumedUpdateFailures
+                 << " invalid: " << resumed_update_failures;
     return false;
+  }
 
   // Validation check the rest.
   int64_t next_data_offset = -1;
   if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
-        next_data_offset >= 0))
+        next_data_offset >= 0)) {
+    LOG(WARNING) << "Failed to resume update "
+                 << kPrefsUpdateStateNextDataOffset
+                 << " invalid: " << next_data_offset;
     return false;
+  }
 
   string sha256_context;
   if (!(prefs->GetString(kPrefsUpdateStateSHA256Context, &sha256_context) &&
-        !sha256_context.empty()))
+        !sha256_context.empty())) {
+    LOG(WARNING) << "Failed to resume update " << kPrefsUpdateStateSHA256Context
+                 << " is empty.";
     return false;
+  }
 
   int64_t manifest_metadata_size = 0;
   if (!(prefs->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
-        manifest_metadata_size > 0))
+        manifest_metadata_size > 0)) {
+    LOG(WARNING) << "Failed to resume update " << kPrefsManifestMetadataSize
+                 << " invalid: " << manifest_metadata_size;
     return false;
+  }
 
   int64_t manifest_signature_size = 0;
   if (!(prefs->GetInt64(kPrefsManifestSignatureSize,
                         &manifest_signature_size) &&
-        manifest_signature_size >= 0))
+        manifest_signature_size >= 0)) {
+    LOG(WARNING) << "Failed to resume update " << kPrefsManifestSignatureSize
+                 << " invalid: " << manifest_signature_size;
     return false;
+  }
 
   return true;
 }
 
-bool DeltaPerformer::ResetUpdateProgress(
-    PrefsInterface* prefs,
-    bool quick,
-    bool skip_dynamic_partititon_metadata_updated) {
+bool DeltaPerformer::ResetUpdateProgress(PrefsInterface* prefs, bool quick) {
   TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation,
                                         kUpdateStateOperationInvalid));
   if (!quick) {
@@ -1264,10 +1384,8 @@
     prefs->Delete(kPrefsPostInstallSucceeded);
     prefs->Delete(kPrefsVerityWritten);
 
-    if (!skip_dynamic_partititon_metadata_updated) {
-      LOG(INFO) << "Resetting recorded hash for prepared partitions.";
-      prefs->Delete(kPrefsDynamicPartitionMetadataUpdated);
-    }
+    LOG(INFO) << "Resetting recorded hash for prepared partitions.";
+    prefs->Delete(kPrefsDynamicPartitionMetadataUpdated);
   }
   return true;
 }
@@ -1394,7 +1512,7 @@
   total_bytes_received_ += buffer_offset_;
 
   // Speculatively count the resume as a failure.
-  int64_t resumed_update_failures;
+  int64_t resumed_update_failures{};
   if (prefs_->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures)) {
     resumed_update_failures++;
   } else {
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index dd71467..12238a7 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -32,10 +32,9 @@
 
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/platform_constants.h"
-#include "update_engine/payload_consumer/file_descriptor.h"
 #include "update_engine/payload_consumer/file_writer.h"
 #include "update_engine/payload_consumer/install_plan.h"
-#include "update_engine/payload_consumer/partition_writer.h"
+#include "update_engine/payload_consumer/partition_writer_interface.h"
 #include "update_engine/payload_consumer/payload_metadata.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/update_metadata.pb.h"
@@ -88,7 +87,7 @@
   // FileWriter's Write implementation where caller doesn't care about
   // error codes.
   bool Write(const void* bytes, size_t count) override {
-    ErrorCode error;
+    ErrorCode error{};
     return Write(bytes, count, &error);
   }
 
@@ -148,10 +147,7 @@
   // If |skip_dynamic_partititon_metadata_updated| is true, do not reset
   // dynamic-partition-metadata-updated.
   // Returns true on success, false otherwise.
-  static bool ResetUpdateProgress(
-      PrefsInterface* prefs,
-      bool quick,
-      bool skip_dynamic_partititon_metadata_updated = false);
+  static bool ResetUpdateProgress(PrefsInterface* prefs, bool quick);
 
   // Attempts to parse the update metadata starting from the beginning of
   // |payload|. On success, returns kMetadataParseSuccess. Returns
@@ -315,6 +311,8 @@
   // Check if partition `part_name` is a dynamic partition.
   bool IsDynamicPartition(const std::string& part_name, uint32_t slot);
 
+  bool CheckSPLDowngrade();
+
   // Update Engine preference store.
   PrefsInterface* prefs_;
 
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index de948fb..bffee8d 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -46,7 +46,6 @@
 #include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_metadata.h"
-#include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/payload_signer.h"
 #include "update_engine/update_metadata.pb.h"
@@ -221,7 +220,7 @@
 static void SignGeneratedPayload(const string& payload_path,
                                  uint64_t* out_metadata_size) {
   string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath);
-  size_t signature_size;
+  size_t signature_size{};
   ASSERT_TRUE(PayloadSigner::GetMaximumSignatureSize(private_key_path,
                                                      &signature_size));
   brillo::Blob metadata_hash, payload_hash;
@@ -249,7 +248,7 @@
     bool verification_success) {
   vector<string> signature_size_strings;
   for (const auto& key_path : private_key_paths) {
-    size_t signature_size;
+    size_t signature_size{};
     ASSERT_TRUE(
         PayloadSigner::GetMaximumSignatureSize(key_path, &signature_size));
     signature_size_strings.push_back(base::StringPrintf("%zu", signature_size));
@@ -593,7 +592,7 @@
 
   if (signature_test == kSignatureGeneratedPlaceholder ||
       signature_test == kSignatureGeneratedPlaceholderMismatch) {
-    size_t signature_size;
+    size_t signature_size{};
     ASSERT_TRUE(PayloadSigner::GetMaximumSignatureSize(
         GetBuildArtifactsPath(kUnittestPrivateKeyPath), &signature_size));
     LOG(INFO) << "Inserting placeholder signature.";
@@ -603,7 +602,7 @@
     if (signature_test == kSignatureGeneratedPlaceholderMismatch) {
       signature_size -= 1;
       LOG(INFO) << "Inserting mismatched placeholder signature.";
-      ASSERT_FALSE(InsertSignaturePlaceholder(
+      ASSERT_TRUE(InsertSignaturePlaceholder(
           signature_size, state->delta_file->path(), &state->metadata_size));
       return;
     }
@@ -641,6 +640,7 @@
     ASSERT_TRUE(payload_metadata.ParsePayloadHeader(state->delta));
     state->metadata_size = payload_metadata.GetMetadataSize();
     LOG(INFO) << "Metadata size: " << state->metadata_size;
+    LOG(INFO) << "Payload size: " << state->delta.size();
     state->metadata_signature_size =
         payload_metadata.GetMetadataSignatureSize();
     LOG(INFO) << "Metadata signature size: " << state->metadata_signature_size;
@@ -828,8 +828,8 @@
                                                install_plan->target_slot,
                                                state->result_kernel->path());
 
-  ErrorCode expected_error, actual_error;
-  bool continue_writing;
+  ErrorCode expected_error{}, actual_error{};
+  bool continue_writing{};
   switch (op_hash_test) {
     case kInvalidOperationData: {
       // Muck with some random offset post the metadata size so that
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index effc8f3..0f48da1 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -49,7 +49,6 @@
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/testing_constants.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/fake_file_descriptor.h"
 #include "update_engine/payload_consumer/mock_partition_writer.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_metadata.h"
@@ -339,7 +338,7 @@
 
     payload_.metadata_size = expected_metadata_size;
     payload_.size = actual_metadata_size + 1;
-    ErrorCode error_code;
+    ErrorCode error_code{};
     // When filling in size in manifest, exclude the size of the 24-byte header.
     uint64_t size_in_manifest = htobe64(actual_metadata_size - 24);
     performer_.Write(&size_in_manifest, 8, &error_code);
@@ -374,8 +373,8 @@
 
     install_plan_.hash_checks_mandatory = hash_checks_mandatory;
 
-    MetadataParseResult expected_result, actual_result;
-    ErrorCode expected_error, actual_error;
+    MetadataParseResult expected_result{}, actual_result{};
+    ErrorCode expected_error{}, actual_error{};
 
     // Fill up the metadata signature in install plan according to the test.
     switch (metadata_signature_test) {
@@ -886,7 +885,7 @@
                                PayloadMetadata::kDeltaManifestSizeSize));
   uint32_t metadata_signature_size_be = htobe32(metadata_signature_size);
 
-  ErrorCode error;
+  ErrorCode error{};
   EXPECT_FALSE(
       performer_.Write(&metadata_signature_size_be,
                        PayloadMetadata::kDeltaMetadataSignatureSizeSize + 1,
@@ -916,7 +915,7 @@
                                PayloadMetadata::kDeltaManifestSizeSize));
 
   uint32_t metadata_signature_size_be = htobe32(metadata_signature_size);
-  ErrorCode error;
+  ErrorCode error{};
   EXPECT_FALSE(
       performer_.Write(&metadata_signature_size_be,
                        PayloadMetadata::kDeltaMetadataSignatureSizeSize + 1,
@@ -930,7 +929,7 @@
       {}, {}, true, kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion);
   install_plan_.hash_checks_mandatory = true;
   payload_.size = payload_data.size();
-  ErrorCode error;
+  ErrorCode error{};
   EXPECT_EQ(MetadataParseResult::kSuccess,
             performer_.ParsePayloadMetadata(payload_data, &error));
   EXPECT_EQ(ErrorCode::kSuccess, error);
@@ -1072,14 +1071,14 @@
 TEST(DISABLED_ConfVersionTest, ConfVersionsMatch) {
   // Test that the versions in update_engine.conf that is installed to the
   // image match the maximum supported delta versions in the update engine.
-  uint32_t minor_version;
+  uint32_t minor_version{};
   brillo::KeyValueStore store;
   EXPECT_TRUE(store.Load(GetBuildArtifactsPath().Append("update_engine.conf")));
   EXPECT_TRUE(utils::GetMinorVersion(store, &minor_version));
   EXPECT_EQ(kMaxSupportedMinorPayloadVersion, minor_version);
 
   string major_version_str;
-  uint64_t major_version;
+  uint64_t major_version{};
   EXPECT_TRUE(store.GetString("PAYLOAD_MAJOR_VERSION", &major_version_str));
   EXPECT_TRUE(base::StringToUint64(major_version_str, &major_version));
   EXPECT_EQ(kMaxSupportedMajorPayloadVersion, major_version);
diff --git a/payload_consumer/extent_map.h b/payload_consumer/extent_map.h
index a985cb5..a83bf0f 100644
--- a/payload_consumer/extent_map.h
+++ b/payload_consumer/extent_map.h
@@ -56,6 +56,15 @@
     const auto it = map_.find(extent);
     if (it == map_.end()) {
       for (const auto& ext : set_.GetCandidateRange(extent)) {
+        // Sometimes there are operations like
+        // map.AddExtent({0, 5}, 42);
+        // map.Get({2, 1})
+        // If the querying extent is completely covered within the key, we still
+        // consdier this to be a valid query.
+
+        if (ExtentContains(ext, extent)) {
+          return map_.at(ext);
+        }
         if (ExtentRanges::ExtentsOverlap(ext, extent)) {
           LOG(WARNING) << "Looking up a partially intersecting extent isn't "
                           "supported by "
diff --git a/payload_consumer/extent_map_unittest.cc b/payload_consumer/extent_map_unittest.cc
index d8137a0..8e79b33 100644
--- a/payload_consumer/extent_map_unittest.cc
+++ b/payload_consumer/extent_map_unittest.cc
@@ -20,7 +20,6 @@
 
 #include "update_engine/payload_consumer/extent_map.h"
 #include "update_engine/payload_generator/extent_ranges.h"
-#include "update_engine/payload_generator/extent_utils.h"
 
 namespace chromeos_update_engine {
 
@@ -41,6 +40,17 @@
   ASSERT_TRUE(map_.AddExtent(ExtentForRange(0, 5), 7));
   ASSERT_TRUE(map_.AddExtent(ExtentForRange(10, 5), 1));
   auto ret = map_.Get(ExtentForRange(1, 2));
+  ASSERT_EQ(ret, 7);
+}
+
+TEST_F(ExtentMapTest, QueryNoMerge) {
+  ASSERT_TRUE(map_.AddExtent(ExtentForRange(0, 5), 7));
+  ASSERT_TRUE(map_.AddExtent(ExtentForRange(5, 5), 1));
+  auto ret = map_.Get(ExtentForRange(1, 2));
+  ASSERT_EQ(ret, 7);
+  ret = map_.Get(ExtentForRange(0, 10));
+  ASSERT_EQ(ret, std::nullopt);
+  ret = map_.Get(ExtentForRange(4, 3));
   ASSERT_EQ(ret, std::nullopt);
 }
 
@@ -48,9 +58,9 @@
   ASSERT_TRUE(map_.AddExtent(ExtentForRange(0, 5), 7));
   ASSERT_TRUE(map_.AddExtent(ExtentForRange(10, 5), 1));
   auto ret = map_.Get(ExtentForRange(3, 2));
-  ASSERT_EQ(ret, std::nullopt);
+  ASSERT_EQ(ret, 7);
   ret = map_.Get(ExtentForRange(4, 1));
-  ASSERT_EQ(ret, std::nullopt);
+  ASSERT_EQ(ret, 7);
   ret = map_.Get(ExtentForRange(5, 5));
   ASSERT_EQ(ret, std::nullopt);
   ret = map_.Get(ExtentForRange(5, 6));
diff --git a/payload_consumer/file_descriptor.h b/payload_consumer/file_descriptor.h
index faebcc1..f672871 100644
--- a/payload_consumer/file_descriptor.h
+++ b/payload_consumer/file_descriptor.h
@@ -103,12 +103,19 @@
   // Indicates whether the descriptor is currently open.
   virtual bool IsOpen() = 0;
 
+  // Return the wrapped underlying file descriptor. Some classes might not
+  // support this.
+  // Using read/write syscall to read from the returned file descriptor should
+  // have same effect as calling Read()/Write() method of this FileDescriptor
+  // instance.
+  virtual int Fd() { return -1; }
+
  private:
   DISALLOW_COPY_AND_ASSIGN(FileDescriptor);
 };
 
 // A simple EINTR-immune wrapper implementation around standard system calls.
-class EintrSafeFileDescriptor : public FileDescriptor {
+class EintrSafeFileDescriptor final : public FileDescriptor {
  public:
   EintrSafeFileDescriptor() : fd_(-1) {}
   ~EintrSafeFileDescriptor();
@@ -128,6 +135,7 @@
   bool Close() override;
   bool IsSettingErrno() override { return true; }
   bool IsOpen() override { return (fd_ >= 0); }
+  int Fd() override { return fd_; }
 
  protected:
   int fd_;
diff --git a/payload_consumer/filesystem_verifier_action.cc b/payload_consumer/filesystem_verifier_action.cc
index 2770aff..dfbe513 100644
--- a/payload_consumer/filesystem_verifier_action.cc
+++ b/payload_consumer/filesystem_verifier_action.cc
@@ -24,7 +24,9 @@
 
 #include <algorithm>
 #include <cstdlib>
+#include <functional>
 #include <memory>
+#include <numeric>
 #include <string>
 #include <utility>
 
@@ -35,9 +37,10 @@
 #include <brillo/secure_blob.h>
 #include <brillo/streams/file_stream.h>
 
-#include "common/error_code.h"
+#include "update_engine/common/error_code.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/file_descriptor.h"
+#include "update_engine/payload_consumer/install_plan.h"
 
 using brillo::data_encoding::Base64Encode;
 using std::string;
@@ -77,7 +80,9 @@
 
 namespace {
 const off_t kReadFileBufferSize = 128 * 1024;
-constexpr float kVerityProgressPercent = 0.6;
+constexpr float kVerityProgressPercent = 0.3;
+constexpr float kEncodeFECPercent = 0.3;
+
 }  // namespace
 
 void FilesystemVerifierAction::PerformAction() {
@@ -97,7 +102,27 @@
     abort_action_completer.set_code(ErrorCode::kSuccess);
     return;
   }
+  // partition_weight_[i] = total size of partitions before index i.
+  partition_weight_.clear();
+  partition_weight_.reserve(install_plan_.partitions.size() + 1);
+  partition_weight_.push_back(0);
+  for (const auto& part : install_plan_.partitions) {
+    partition_weight_.push_back(part.target_size);
+  }
+  std::partial_sum(partition_weight_.begin(),
+                   partition_weight_.end(),
+                   partition_weight_.begin(),
+                   std::plus<size_t>());
+
   install_plan_.Dump();
+  // If we are not writing verity, just map all partitions once at the
+  // beginning.
+  // No need to re-map for each partition, because we are not writing any new
+  // COW data.
+  if (dynamic_control_->UpdateUsesSnapshotCompression() &&
+      !install_plan_.write_verity) {
+    dynamic_control_->MapAllPartitions();
+  }
   StartPartitionHashing();
   abort_action_completer.set_should_complete(false);
 }
@@ -135,11 +160,9 @@
 }
 
 void FilesystemVerifierAction::UpdatePartitionProgress(double progress) {
-  // We don't consider sizes of each partition. Every partition
-  // has the same length on progress bar.
-  // TODO(b/186087589): Take sizes of each partition into account.
-  UpdateProgress((progress + partition_index_) /
-                 install_plan_.partitions.size());
+  UpdateProgress((partition_weight_[partition_index_] * (1 - progress) +
+                  partition_weight_[partition_index_ + 1] * progress) /
+                 partition_weight_.back());
 }
 
 bool FilesystemVerifierAction::InitializeFdVABC(bool should_write_verity) {
@@ -166,8 +189,10 @@
     // writes won't be visible to previously opened snapuserd daemon. To ensure
     // that we will see the most up to date data from partitions, call Unmap()
     // then Map() to re-spin daemon.
-    dynamic_control_->UnmapAllPartitions();
-    dynamic_control_->MapAllPartitions();
+    if (install_plan_.write_verity) {
+      dynamic_control_->UnmapAllPartitions();
+      dynamic_control_->MapAllPartitions();
+    }
     return InitializeFd(partition.readonly_target_path);
   }
   partition_fd_ =
@@ -196,6 +221,37 @@
   return true;
 }
 
+void FilesystemVerifierAction::WriteVerityData(FileDescriptor* fd,
+                                               void* buffer,
+                                               const size_t buffer_size) {
+  if (verity_writer_->FECFinished()) {
+    LOG(INFO) << "EncodeFEC is completed. Resuming other tasks";
+    if (dynamic_control_->UpdateUsesSnapshotCompression()) {
+      // Spin up snapuserd to read fs.
+      if (!InitializeFdVABC(false)) {
+        LOG(ERROR) << "Failed to map all partitions";
+        Cleanup(ErrorCode::kFilesystemVerifierError);
+        return;
+      }
+    }
+    HashPartition(0, partition_size_, buffer, buffer_size);
+    return;
+  }
+  if (!verity_writer_->IncrementalFinalize(fd, fd)) {
+    LOG(ERROR) << "Failed to write verity data";
+    Cleanup(ErrorCode::kVerityCalculationError);
+  }
+  UpdatePartitionProgress(kVerityProgressPercent +
+                          verity_writer_->GetProgress() * kEncodeFECPercent);
+  CHECK(pending_task_id_.PostTask(
+      FROM_HERE,
+      base::BindOnce(&FilesystemVerifierAction::WriteVerityData,
+                     base::Unretained(this),
+                     fd,
+                     buffer,
+                     buffer_size)));
+}
+
 void FilesystemVerifierAction::WriteVerityAndHashPartition(
     const off64_t start_offset,
     const off64_t end_offset,
@@ -207,20 +263,7 @@
     LOG_IF(WARNING, start_offset > end_offset)
         << "start_offset is greater than end_offset : " << start_offset << " > "
         << end_offset;
-    if (!verity_writer_->Finalize(fd, fd)) {
-      LOG(ERROR) << "Failed to write verity data";
-      Cleanup(ErrorCode::kVerityCalculationError);
-      return;
-    }
-    if (dynamic_control_->UpdateUsesSnapshotCompression()) {
-      // Spin up snapuserd to read fs.
-      if (!InitializeFdVABC(false)) {
-        LOG(ERROR) << "Failed to map all partitions";
-        Cleanup(ErrorCode::kFilesystemVerifierError);
-        return;
-      }
-    }
-    HashPartition(0, partition_size_, buffer, buffer_size);
+    WriteVerityData(fd, buffer, buffer_size);
     return;
   }
   const auto cur_offset = fd->Seek(start_offset, SEEK_SET);
@@ -290,8 +333,16 @@
     return;
   }
   const auto progress = (start_offset + bytes_read) * 1.0f / partition_size_;
-  UpdatePartitionProgress(progress * (1 - kVerityProgressPercent) +
-                          kVerityProgressPercent);
+  // If we are writing verity, then the progress bar will be split between
+  // verity writes and partition hashing. Otherwise, the entire progress bar is
+  // dedicated to partition hashing for smooth progress.
+  if (ShouldWriteVerity()) {
+    UpdatePartitionProgress(
+        progress * (1 - (kVerityProgressPercent + kEncodeFECPercent)) +
+        kVerityProgressPercent + kEncodeFECPercent);
+  } else {
+    UpdatePartitionProgress(progress);
+  }
   CHECK(pending_task_id_.PostTask(
       FROM_HERE,
       base::BindOnce(&FilesystemVerifierAction::HashPartition,
diff --git a/payload_consumer/filesystem_verifier_action.h b/payload_consumer/filesystem_verifier_action.h
index edc8e53..5bc44b1 100644
--- a/payload_consumer/filesystem_verifier_action.h
+++ b/payload_consumer/filesystem_verifier_action.h
@@ -86,6 +86,10 @@
 
  private:
   friend class FilesystemVerifierActionTestDelegate;
+  // Wrapper function that schedules calls of EncodeFEC. Returns true on success
+  void WriteVerityData(FileDescriptor* fd,
+                       void* buffer,
+                       const size_t buffer_size);
   void WriteVerityAndHashPartition(const off64_t start_offset,
                                    const off64_t end_offset,
                                    void* buffer,
@@ -172,6 +176,11 @@
   // points to pending read callbacks from async stream.
   ScopedTaskId pending_task_id_;
 
+  // Cumulative sum of partition sizes. Used for progress report.
+  // This vector will always start with 0, and end with total size of all
+  // partitions.
+  std::vector<size_t> partition_weight_;
+
   DISALLOW_COPY_AND_ASSIGN(FilesystemVerifierAction);
 };
 
diff --git a/payload_consumer/filesystem_verifier_action_unittest.cc b/payload_consumer/filesystem_verifier_action_unittest.cc
index 533292a..b2ed158 100644
--- a/payload_consumer/filesystem_verifier_action_unittest.cc
+++ b/payload_consumer/filesystem_verifier_action_unittest.cc
@@ -37,7 +37,6 @@
 #include "update_engine/common/mock_dynamic_partition_control.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/fake_file_descriptor.h"
 #include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/payload_consumer/verity_writer_android.h"
 
diff --git a/payload_consumer/install_plan.cc b/payload_consumer/install_plan.cc
index 91eb53b..cea8e5a 100644
--- a/payload_consumer/install_plan.cc
+++ b/payload_consumer/install_plan.cc
@@ -26,7 +26,6 @@
 #include <base/strings/stringprintf.h>
 
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/update_metadata.pb.h"
 
 using std::string;
diff --git a/payload_consumer/install_plan.h b/payload_consumer/install_plan.h
index 883aa60..93aebce 100644
--- a/payload_consumer/install_plan.h
+++ b/payload_consumer/install_plan.h
@@ -73,6 +73,8 @@
       ErrorCode* error);
 
   bool is_resume{false};
+  bool vabc_none{false};
+  bool disable_vabc{false};
   std::string download_url;  // url to download from
   std::string version;       // version we are installing.
 
@@ -194,6 +196,12 @@
   // The name of dynamic partitions not included in the payload. Only used
   // for partial updates.
   std::vector<std::string> untouched_dynamic_partitions;
+
+  // Whether to batch write operations for COW
+  bool batched_writes = false;
+
+  // Whether to enable multi-threaded compression on COW writes
+  bool enable_threading = false;
 };
 
 class InstallPlanAction;
diff --git a/payload_consumer/partition_writer.cc b/payload_consumer/partition_writer.cc
index 1fb929e..d7d8bea 100644
--- a/payload_consumer/partition_writer.cc
+++ b/payload_consumer/partition_writer.cc
@@ -160,7 +160,7 @@
   // partial updates. Use the source size as the indicator.
 
   target_path_ = install_part_.target_path;
-  int err;
+  int err{};
 
   int flags = O_RDWR;
   if (!interactive_)
@@ -288,7 +288,9 @@
 }
 
 void PartitionWriter::CheckpointUpdateProgress(size_t next_op_index) {
-  target_fd_->Flush();
+  if (target_fd_) {
+    target_fd_->Flush();
+  }
 }
 
 std::unique_ptr<ExtentWriter> PartitionWriter::CreateBaseExtentWriter() {
diff --git a/payload_consumer/partition_writer_factory_android.cc b/payload_consumer/partition_writer_factory_android.cc
index 6736620..a04d726 100644
--- a/payload_consumer/partition_writer_factory_android.cc
+++ b/payload_consumer/partition_writer_factory_android.cc
@@ -19,6 +19,7 @@
 
 #include <base/logging.h>
 
+#include "update_engine/payload_consumer/partition_writer.h"
 #include "update_engine/payload_consumer/vabc_partition_writer.h"
 
 namespace chromeos_update_engine::partition_writer {
diff --git a/payload_consumer/partition_writer_interface.h b/payload_consumer/partition_writer_interface.h
index e346292..8080795 100644
--- a/payload_consumer/partition_writer_interface.h
+++ b/payload_consumer/partition_writer_interface.h
@@ -23,9 +23,6 @@
 #include <brillo/secure_blob.h>
 #include <gtest/gtest_prod.h>
 
-#include "update_engine/common/dynamic_partition_control_interface.h"
-#include "update_engine/payload_consumer/extent_writer.h"
-#include "update_engine/payload_consumer/file_descriptor.h"
 #include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/update_metadata.pb.h"
 
diff --git a/payload_consumer/partition_writer_unittest.cc b/payload_consumer/partition_writer_unittest.cc
index 331a061..4910594 100644
--- a/payload_consumer/partition_writer_unittest.cc
+++ b/payload_consumer/partition_writer_unittest.cc
@@ -26,16 +26,15 @@
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/delta_performer.h"
-#include "update_engine/payload_consumer/extent_reader.h"
 #include "update_engine/payload_consumer/extent_writer.h"
 #include "update_engine/payload_consumer/fake_file_descriptor.h"
 #include "update_engine/payload_consumer/file_descriptor.h"
 #include "update_engine/payload_consumer/install_plan.h"
+#include "update_engine/payload_consumer/partition_writer.h"
+#include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/annotated_operation.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/extent_ranges.h"
-#include "update_engine/payload_generator/payload_file.h"
 #include "update_engine/payload_generator/payload_generation_config.h"
 #include "update_engine/update_metadata.pb.h"
 
diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h
index f23b668..a38405d 100644
--- a/payload_consumer/payload_metadata.h
+++ b/payload_consumer/payload_metadata.h
@@ -26,7 +26,6 @@
 #include <brillo/secure_blob.h>
 
 #include "update_engine/common/error_code.h"
-#include "update_engine/common/platform_constants.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
 #include "update_engine/update_metadata.pb.h"
 
diff --git a/payload_consumer/payload_verifier.cc b/payload_consumer/payload_verifier.cc
index 8a3ea65..83ef8c9 100644
--- a/payload_consumer/payload_verifier.cc
+++ b/payload_consumer/payload_verifier.cc
@@ -23,7 +23,6 @@
 #include <openssl/pem.h>
 
 #include "update_engine/common/constants.h"
-#include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/certificate_parser_interface.h"
 #include "update_engine/update_metadata.pb.h"
diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc
index a72462a..a6fa4b3 100644
--- a/payload_consumer/postinstall_runner_action.cc
+++ b/payload_consumer/postinstall_runner_action.cc
@@ -36,7 +36,6 @@
 
 #include "update_engine/common/action_processor.h"
 #include "update_engine/common/boot_control_interface.h"
-#include "update_engine/common/platform_constants.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/utils.h"
 
@@ -114,9 +113,11 @@
   if (dynamic_control->UpdateUsesSnapshotCompression()) {
     // Before calling MapAllPartitions to map snapshot devices, all CowWriters
     // must be closed, and MapAllPartitions() should be called.
-    dynamic_control->UnmapAllPartitions();
-    if (!dynamic_control->MapAllPartitions()) {
-      return CompletePostinstall(ErrorCode::kPostInstallMountError);
+    if (!install_plan_.partitions.empty()) {
+      dynamic_control->UnmapAllPartitions();
+      if (!dynamic_control->MapAllPartitions()) {
+        return CompletePostinstall(ErrorCode::kPostInstallMountError);
+      }
     }
   }
 
@@ -201,7 +202,7 @@
 
 void PostinstallRunnerAction::PerformPartitionPostinstall() {
   if (install_plan_.download_url.empty()) {
-    LOG(INFO) << "Skipping post-install during rollback";
+    LOG(INFO) << "Skipping post-install";
     return CompletePostinstall(ErrorCode::kSuccess);
   }
 
@@ -447,11 +448,12 @@
       error_code = ErrorCode::kUpdatedButNotActive;
     }
   }
-
-  auto dynamic_control = boot_control_->GetDynamicPartitionControl();
-  CHECK(dynamic_control);
-  dynamic_control->UnmapAllPartitions();
-  LOG(INFO) << "Unmapped all partitions.";
+  if (!install_plan_.partitions.empty()) {
+    auto dynamic_control = boot_control_->GetDynamicPartitionControl();
+    CHECK(dynamic_control);
+    dynamic_control->UnmapAllPartitions();
+    LOG(INFO) << "Unmapped all partitions.";
+  }
 
   ScopedActionCompleter completer(processor_, this);
   completer.set_code(error_code);
diff --git a/payload_consumer/snapshot_extent_writer_unittest.cc b/payload_consumer/snapshot_extent_writer_unittest.cc
index 0c96c3e..d43d3a1 100644
--- a/payload_consumer/snapshot_extent_writer_unittest.cc
+++ b/payload_consumer/snapshot_extent_writer_unittest.cc
@@ -43,9 +43,14 @@
   using ICowWriter::ICowWriter;
   ~FakeCowWriter() = default;
 
-  bool EmitCopy(uint64_t new_block, uint64_t old_block) override {
-    operations_[new_block] = {.type = CowOp::COW_COPY,
-                              .source_block = static_cast<size_t>(old_block)};
+  bool EmitCopy(uint64_t new_block,
+                uint64_t old_block,
+                uint64_t num_blocks) override {
+    for (size_t i = 0; i < num_blocks; i++) {
+      operations_[new_block + i] = {
+          .type = CowOp::COW_COPY,
+          .source_block = static_cast<size_t>(old_block + i)};
+    }
     return true;
   }
   bool EmitRawBlocks(uint64_t new_block_start,
diff --git a/payload_consumer/vabc_partition_writer.cc b/payload_consumer/vabc_partition_writer.cc
index 8ae0b51..17b7d50 100644
--- a/payload_consumer/vabc_partition_writer.cc
+++ b/payload_consumer/vabc_partition_writer.cc
@@ -29,13 +29,9 @@
 
 #include "update_engine/common/cow_operation_convert.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/block_extent_writer.h"
 #include "update_engine/payload_consumer/extent_map.h"
-#include "update_engine/payload_consumer/extent_reader.h"
 #include "update_engine/payload_consumer/file_descriptor.h"
-#include "update_engine/payload_consumer/file_descriptor_utils.h"
 #include "update_engine/payload_consumer/install_plan.h"
-#include "update_engine/payload_consumer/partition_writer.h"
 #include "update_engine/payload_consumer/snapshot_extent_writer.h"
 #include "update_engine/payload_consumer/xor_extent_writer.h"
 #include "update_engine/payload_generator/extent_ranges.h"
@@ -92,7 +88,50 @@
       dynamic_control_(dynamic_control),
       block_size_(block_size),
       executor_(block_size),
-      verified_source_fd_(block_size, install_part.source_path) {}
+      verified_source_fd_(block_size, install_part.source_path) {
+  for (const auto& cow_op : partition_update_.merge_operations()) {
+    if (cow_op.type() != CowMergeOperation::COW_COPY) {
+      continue;
+    }
+    copy_blocks_.AddExtent(cow_op.dst_extent());
+  }
+  LOG(INFO) << "Partition `" << partition_update.partition_name() << " has "
+            << copy_blocks_.blocks() << " copy blocks";
+}
+
+bool VABCPartitionWriter::DoesDeviceSupportsXor() {
+  return dynamic_control_->GetVirtualAbCompressionXorFeatureFlag().IsEnabled();
+}
+
+bool VABCPartitionWriter::WriteAllCopyOps() {
+  const bool userSnapshots = android::base::GetBoolProperty(
+      "ro.virtual_ab.userspace.snapshots.enabled", false);
+  for (const auto& cow_op : partition_update_.merge_operations()) {
+    if (cow_op.type() != CowMergeOperation::COW_COPY) {
+      continue;
+    }
+    if (cow_op.dst_extent() == cow_op.src_extent()) {
+      continue;
+    }
+    if (userSnapshots) {
+      TEST_AND_RETURN_FALSE(cow_op.src_extent().num_blocks() != 0);
+      TEST_AND_RETURN_FALSE(
+          cow_writer_->AddCopy(cow_op.dst_extent().start_block(),
+                               cow_op.src_extent().start_block(),
+                               cow_op.src_extent().num_blocks()));
+    } else {
+      // Add blocks in reverse order, because snapused specifically prefers
+      // this ordering. Since we already eliminated all self-overlapping
+      // SOURCE_COPY during delta generation, this should be safe to do.
+      for (size_t i = cow_op.src_extent().num_blocks(); i > 0; i--) {
+        TEST_AND_RETURN_FALSE(
+            cow_writer_->AddCopy(cow_op.dst_extent().start_block() + i - 1,
+                                 cow_op.src_extent().start_block() + i - 1));
+      }
+    }
+  }
+  return true;
+}
 
 bool VABCPartitionWriter::Init(const InstallPlan* install_plan,
                                bool source_may_exist,
@@ -141,26 +180,22 @@
     if (IsXorEnabled()) {
       LOG(INFO) << "VABC XOR enabled for partition "
                 << partition_update_.partition_name();
+    }
+    // When merge sequence is present in COW, snapuserd will merge blocks in
+    // order specified by the merge seuqnece op. Hence we have the freedom of
+    // writing COPY operations out of order. Delay processing of copy ops so
+    // that update_engine can be more responsive in progress updates.
+    if (DoesDeviceSupportsXor()) {
+      LOG(INFO) << "Snapuserd supports XOR and merge sequence, writing merge "
+                   "sequence and delay writing COPY operations";
       TEST_AND_RETURN_FALSE(WriteMergeSequence(
           partition_update_.merge_operations(), cow_writer_.get()));
+    } else {
+      LOG(INFO) << "Snapuserd does not support merge sequence, writing all "
+                   "COPY operations up front, this may take few "
+                   "minutes.";
+      TEST_AND_RETURN_FALSE(WriteAllCopyOps());
     }
-  }
-
-  // TODO(zhangkelvin) Rewrite this in C++20 coroutine once that's available.
-  // TODO(177104308) Don't write all COPY ops up-front if merge sequence is
-  // written
-  const auto converted = ConvertToCowOperations(
-      partition_update_.operations(), partition_update_.merge_operations());
-
-  if (!converted.empty()) {
-    // Use source fd directly. Ideally we want to verify all extents used in
-    // source copy, but then what do we do if some extents contain correct
-    // hashes and some don't?
-    auto source_fd = std::make_shared<EintrSafeFileDescriptor>();
-    TEST_AND_RETURN_FALSE_ERRNO(
-        source_fd->Open(install_part_.source_path.c_str(), O_RDONLY));
-    TEST_AND_RETURN_FALSE(WriteSourceCopyCowOps(
-        block_size_, converted, cow_writer_.get(), source_fd));
     cow_writer_->AddLabel(0);
   }
   return true;
@@ -221,48 +256,6 @@
                                      blocks_merge_order.data());
 }
 
-bool VABCPartitionWriter::WriteSourceCopyCowOps(
-    size_t block_size,
-    const std::vector<CowOperation>& converted,
-    ICowWriter* cow_writer,
-    FileDescriptorPtr source_fd) {
-  for (const auto& cow_op : converted) {
-    std::vector<uint8_t> buffer;
-    switch (cow_op.op) {
-      case CowOperation::CowCopy:
-        if (cow_op.src_block == cow_op.dst_block) {
-          continue;
-        }
-        // Add blocks in reverse order, because snapused specifically prefers
-        // this ordering. Since we already eliminated all self-overlapping
-        // SOURCE_COPY during delta generation, this should be safe to do.
-        for (size_t i = cow_op.block_count; i > 0; i--) {
-          TEST_AND_RETURN_FALSE(cow_writer->AddCopy(cow_op.dst_block + i - 1,
-                                                    cow_op.src_block + i - 1));
-        }
-        break;
-      case CowOperation::CowReplace:
-        buffer.resize(block_size * cow_op.block_count);
-        ssize_t bytes_read = 0;
-        TEST_AND_RETURN_FALSE(utils::ReadAll(source_fd,
-                                             buffer.data(),
-                                             block_size * cow_op.block_count,
-                                             cow_op.src_block * block_size,
-                                             &bytes_read));
-        if (bytes_read <= 0 ||
-            static_cast<size_t>(bytes_read) != buffer.size()) {
-          LOG(ERROR) << "source_fd->Read failed: " << bytes_read;
-          return false;
-        }
-        TEST_AND_RETURN_FALSE(cow_writer->AddRawBlocks(
-            cow_op.dst_block, buffer.data(), buffer.size()));
-        break;
-    }
-  }
-
-  return true;
-}
-
 std::unique_ptr<ExtentWriter> VABCPartitionWriter::CreateBaseExtentWriter() {
   return std::make_unique<SnapshotExtentWriter>(cow_writer_.get());
 }
@@ -280,14 +273,68 @@
     const InstallOperation& operation, ErrorCode* error) {
   // COPY ops are already handled during Init(), no need to do actual work, but
   // we still want to verify that all blocks contain expected data.
-  auto source_fd = std::make_shared<EintrSafeFileDescriptor>();
-  TEST_AND_RETURN_FALSE_ERRNO(
-      source_fd->Open(install_part_.source_path.c_str(), O_RDONLY));
-  if (!operation.has_src_sha256_hash()) {
-    return true;
+  auto source_fd = verified_source_fd_.ChooseSourceFD(operation, error);
+  TEST_AND_RETURN_FALSE(source_fd != nullptr);
+  std::vector<CowOperation> converted;
+
+  const auto& src_extents = operation.src_extents();
+  const auto& dst_extents = operation.dst_extents();
+  BlockIterator it1{src_extents};
+  BlockIterator it2{dst_extents};
+  const bool userSnapshots = android::base::GetBoolProperty(
+      "ro.virtual_ab.userspace.snapshots.enabled", false);
+  // For devices not supporting XOR, sequence op is not supported, so all COPY
+  // operations are written up front in strict merge order.
+  const auto sequence_op_supported = DoesDeviceSupportsXor();
+  while (!it1.is_end() && !it2.is_end()) {
+    const auto src_block = *it1;
+    const auto dst_block = *it2;
+    ++it1;
+    ++it2;
+    if (src_block == dst_block) {
+      continue;
+    }
+    if (copy_blocks_.ContainsBlock(dst_block)) {
+      if (sequence_op_supported) {
+        push_back(&converted, {CowOperation::CowCopy, src_block, dst_block, 1});
+      }
+    } else {
+      push_back(&converted,
+                {CowOperation::CowReplace, src_block, dst_block, 1});
+    }
   }
-  return PartitionWriter::ValidateSourceHash(
-      operation, source_fd, block_size_, error);
+  std::vector<uint8_t> buffer;
+  for (const auto& cow_op : converted) {
+    if (cow_op.op == CowOperation::CowCopy) {
+      if (userSnapshots) {
+        cow_writer_->AddCopy(
+            cow_op.dst_block, cow_op.src_block, cow_op.block_count);
+      } else {
+        // Add blocks in reverse order, because snapused specifically prefers
+        // this ordering. Since we already eliminated all self-overlapping
+        // SOURCE_COPY during delta generation, this should be safe to do.
+        for (size_t i = cow_op.block_count; i > 0; i--) {
+          TEST_AND_RETURN_FALSE(cow_writer_->AddCopy(cow_op.dst_block + i - 1,
+                                                     cow_op.src_block + i - 1));
+        }
+      }
+      continue;
+    }
+    buffer.resize(block_size_ * cow_op.block_count);
+    ssize_t bytes_read = 0;
+    TEST_AND_RETURN_FALSE(utils::ReadAll(source_fd,
+                                         buffer.data(),
+                                         block_size_ * cow_op.block_count,
+                                         cow_op.src_block * block_size_,
+                                         &bytes_read));
+    if (bytes_read <= 0 || static_cast<size_t>(bytes_read) != buffer.size()) {
+      LOG(ERROR) << "source_fd->Read failed: " << bytes_read;
+      return false;
+    }
+    TEST_AND_RETURN_FALSE(cow_writer_->AddRawBlocks(
+        cow_op.dst_block, buffer.data(), buffer.size()));
+  }
+  return true;
 }
 
 bool VABCPartitionWriter::PerformReplaceOperation(const InstallOperation& op,
@@ -311,7 +358,11 @@
 
   std::unique_ptr<ExtentWriter> writer =
       IsXorEnabled() ? std::make_unique<XORExtentWriter>(
-                           operation, source_fd, cow_writer_.get(), xor_map_)
+                           operation,
+                           source_fd,
+                           cow_writer_.get(),
+                           xor_map_,
+                           partition_update_.old_partition_info().size())
                      : CreateBaseExtentWriter();
   return executor_.ExecuteDiffOperation(
       operation, std::move(writer), source_fd, data, count);
@@ -342,6 +393,8 @@
 
 int VABCPartitionWriter::Close() {
   if (cow_writer_) {
+    LOG(INFO) << "Finalizing " << partition_update_.partition_name()
+              << " COW image";
     cow_writer_->Finalize();
     cow_writer_ = nullptr;
   }
diff --git a/payload_consumer/vabc_partition_writer.h b/payload_consumer/vabc_partition_writer.h
index 4df5151..889f376 100644
--- a/payload_consumer/vabc_partition_writer.h
+++ b/payload_consumer/vabc_partition_writer.h
@@ -24,11 +24,11 @@
 
 #include <libsnapshot/snapshot_writer.h>
 
-#include "update_engine/common/cow_operation_convert.h"
 #include "update_engine/payload_consumer/extent_map.h"
 #include "update_engine/payload_consumer/install_operation_executor.h"
 #include "update_engine/payload_consumer/install_plan.h"
-#include "update_engine/payload_consumer/partition_writer.h"
+#include "update_engine/payload_consumer/partition_writer_interface.h"
+#include "update_engine/payload_consumer/verified_source_fd.h"
 #include "update_engine/payload_generator/extent_ranges.h"
 
 namespace chromeos_update_engine {
@@ -62,12 +62,6 @@
 
   void CheckpointUpdateProgress(size_t next_op_index) override;
 
-  [[nodiscard]] static bool WriteSourceCopyCowOps(
-      size_t block_size,
-      const std::vector<CowOperation>& converted,
-      android::snapshot::ICowWriter* cow_writer,
-      FileDescriptorPtr source_fd);
-
   [[nodiscard]] bool FinishedInstallOps() override;
   int Close() override;
   // Send merge sequence data to cow writer
@@ -76,7 +70,9 @@
       android::snapshot::ICowWriter* cow_writer);
 
  private:
+  [[nodiscard]] bool DoesDeviceSupportsXor();
   bool IsXorEnabled() const noexcept { return xor_map_.size() > 0; }
+  [[nodiscard]] bool WriteAllCopyOps();
   std::unique_ptr<android::snapshot::ISnapshotWriter> cow_writer_;
 
   [[nodiscard]] std::unique_ptr<ExtentWriter> CreateBaseExtentWriter();
@@ -91,6 +87,7 @@
   InstallOperationExecutor executor_;
   VerifiedSourceFd verified_source_fd_;
   ExtentMap<const CowMergeOperation*, ExtentLess> xor_map_;
+  ExtentRanges copy_blocks_;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/vabc_partition_writer_unittest.cc b/payload_consumer/vabc_partition_writer_unittest.cc
index 20aa75f..3cdf3bc 100644
--- a/payload_consumer/vabc_partition_writer_unittest.cc
+++ b/payload_consumer/vabc_partition_writer_unittest.cc
@@ -24,11 +24,13 @@
 #include <libsnapshot/cow_writer.h>
 #include <libsnapshot/mock_snapshot_writer.h>
 
+#include "update_engine/common/error_code.h"
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/mock_dynamic_partition_control.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/vabc_partition_writer.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
+#include "update_engine/payload_generator/extent_ranges.h"
 #include "update_engine/update_metadata.pb.h"
 
 namespace chromeos_update_engine {
@@ -55,6 +57,7 @@
   }
 
  protected:
+  void EmitBlockTest(bool xor_enabled);
   CowMergeOperation* AddMergeOp(PartitionUpdate* partition,
                                 std::array<size_t, 2> src_extent,
                                 std::array<size_t, 2> dst_extent,
@@ -102,7 +105,7 @@
         EXPECT_CALL(*cow_writer, EmitSequenceData(_, _))
             .With(Args<1, 0>(ElementsAreArray(expected_merge_sequence)))
             .WillOnce(Return(true));
-        ON_CALL(*cow_writer, EmitCopy(_, _)).WillByDefault(Return(true));
+        ON_CALL(*cow_writer, EmitCopy(_, _, _)).WillByDefault(Return(true));
         ON_CALL(*cow_writer, EmitLabel(_)).WillByDefault(Return(true));
         return cow_writer;
       }));
@@ -127,7 +130,7 @@
             EXPECT_CALL(*cow_writer, EmitSequenceData(_, _))
                 .With(Args<1, 0>(ElementsAreArray(expected_merge_sequence)))
                 .WillOnce(Return(true));
-            ON_CALL(*cow_writer, EmitCopy(_, _)).WillByDefault(Return(true));
+            ON_CALL(*cow_writer, EmitCopy(_, _, _)).WillByDefault(Return(true));
             ON_CALL(*cow_writer, EmitLabel(_)).WillByDefault(Return(true));
             return cow_writer;
           }));
@@ -136,7 +139,33 @@
   ASSERT_TRUE(writer_.Init(&install_plan_, true, 0));
 }
 
-TEST_F(VABCPartitionWriterTest, EmitBlockTest) {
+TEST_F(VABCPartitionWriterTest, EmitBlockTestXor) {
+  return EmitBlockTest(true);
+}
+
+TEST_F(VABCPartitionWriterTest, EmitBlockTestNoXor) {
+  return EmitBlockTest(false);
+}
+
+void VABCPartitionWriterTest::EmitBlockTest(bool xor_enabled) {
+  if (xor_enabled) {
+    ON_CALL(dynamic_control_, GetVirtualAbCompressionXorFeatureFlag())
+        .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::LAUNCH)));
+  } else {
+    ON_CALL(dynamic_control_, GetVirtualAbCompressionXorFeatureFlag())
+        .WillByDefault(Return(FeatureFlag(FeatureFlag::Value::NONE)));
+  }
+  InstallOperation& install_op = *partition_update_.add_operations();
+  install_op.set_type(InstallOperation::SOURCE_COPY);
+  *install_op.add_src_extents() = ExtentForRange(5, 1);
+  *install_op.add_src_extents() = ExtentForRange(10, 1);
+  *install_op.add_src_extents() = ExtentForRange(15, 2);
+  *install_op.add_src_extents() = ExtentForRange(20, 2);
+
+  *install_op.add_dst_extents() = ExtentForRange(10, 1);
+  *install_op.add_dst_extents() = ExtentForRange(15, 1);
+  *install_op.add_dst_extents() = ExtentForRange(20, 2);
+  *install_op.add_dst_extents() = ExtentForRange(25, 2);
   AddMergeOp(&partition_update_, {5, 1}, {10, 1}, CowMergeOperation::COW_COPY);
   AddMergeOp(&partition_update_, {10, 1}, {15, 1}, CowMergeOperation::COW_COPY);
   AddMergeOp(&partition_update_, {15, 2}, {20, 2}, CowMergeOperation::COW_COPY);
@@ -144,26 +173,45 @@
   VABCPartitionWriter writer_{
       partition_update_, install_part_, &dynamic_control_, kBlockSize};
   EXPECT_CALL(dynamic_control_, OpenCowWriter(fake_part_name, _, false))
-      .WillOnce(Invoke(
-          [](const std::string&, const std::optional<std::string>&, bool) {
-            auto cow_writer =
-                std::make_unique<android::snapshot::MockSnapshotWriter>(
-                    android::snapshot::CowOptions{});
-            Sequence s;
-            ON_CALL(*cow_writer, EmitCopy(_, _)).WillByDefault(Return(true));
-            ON_CALL(*cow_writer, EmitLabel(_)).WillByDefault(Return(true));
-            ON_CALL(*cow_writer, Initialize()).WillByDefault(Return(true));
-            EXPECT_CALL(*cow_writer, Initialize()).InSequence(s);
-            EXPECT_CALL(*cow_writer, EmitCopy(10, 5)).InSequence(s);
-            EXPECT_CALL(*cow_writer, EmitCopy(15, 10)).InSequence(s);
-            // libsnapshot want blocks in reverser order, so 21 goes before 20
-            EXPECT_CALL(*cow_writer, EmitCopy(21, 16)).InSequence(s);
-            EXPECT_CALL(*cow_writer, EmitCopy(20, 15)).InSequence(s);
+      .WillOnce(Invoke([xor_enabled](const std::string&,
+                                     const std::optional<std::string>&,
+                                     bool) {
+        auto cow_writer =
+            std::make_unique<android::snapshot::MockSnapshotWriter>(
+                android::snapshot::CowOptions{});
+        ON_CALL(*cow_writer, EmitCopy(_, _, _)).WillByDefault(Return(true));
+        ON_CALL(*cow_writer, EmitLabel(_)).WillByDefault(Return(true));
+        ON_CALL(*cow_writer, Initialize()).WillByDefault(Return(true));
+        EXPECT_CALL(*cow_writer, Initialize());
+        if (xor_enabled) {
+          EXPECT_CALL(*cow_writer, EmitSequenceData(_, _))
+              .WillOnce(Return(true));
+          EXPECT_CALL(*cow_writer, EmitCopy(10, 5, 1));
+          EXPECT_CALL(*cow_writer, EmitCopy(15, 10, 1));
+          // libsnapshot want blocks in reverser order, so 21 goes before 20
+          EXPECT_CALL(*cow_writer, EmitCopy(20, 15, 2));
 
-            EXPECT_CALL(*cow_writer, EmitCopy(25, 20)).InSequence(s);
-            return cow_writer;
-          }));
+          EXPECT_CALL(*cow_writer, EmitCopy(25, 20, 1));
+          EXPECT_CALL(*cow_writer, EmitRawBlocks(26, _, 4096))
+              .WillOnce(Return(true));
+          EXPECT_CALL(*cow_writer, Finalize());
+        } else {
+          Sequence s;
+          EXPECT_CALL(*cow_writer, EmitCopy(10, 5, 1)).InSequence(s);
+          EXPECT_CALL(*cow_writer, EmitCopy(15, 10, 1)).InSequence(s);
+          // libsnapshot want blocks in reverser order, so 21 goes before 20
+          EXPECT_CALL(*cow_writer, EmitCopy(20, 15, 2)).InSequence(s);
+
+          EXPECT_CALL(*cow_writer, EmitCopy(25, 20, 1)).InSequence(s);
+          EXPECT_CALL(*cow_writer, EmitRawBlocks(26, _, 4096))
+              .InSequence(s)
+              .WillOnce(Return(true));
+        }
+        return cow_writer;
+      }));
   ASSERT_TRUE(writer_.Init(&install_plan_, true, 0));
+  ErrorCode error{};
+  ASSERT_TRUE(writer_.PerformSourceCopyOperation(install_op, &error));
 }
 
 std::string GetNoopBSDIFF(size_t data_size) {
@@ -224,7 +272,7 @@
               .WillOnce(Return(true));
         }
         EXPECT_CALL(*cow_writer, Initialize()).Times(1);
-        EXPECT_CALL(*cow_writer, EmitCopy(_, _)).Times(0);
+        EXPECT_CALL(*cow_writer, EmitCopy(_, _, _)).Times(0);
         EXPECT_CALL(*cow_writer, EmitRawBlocks(_, _, _)).WillOnce(Return(true));
         EXPECT_CALL(*cow_writer, EmitXorBlocks(10, _, kBlockSize * 2, 5, 0))
             .WillOnce(Return(true));
diff --git a/payload_consumer/verity_writer_android.cc b/payload_consumer/verity_writer_android.cc
index 91efa3e..4a476d2 100644
--- a/payload_consumer/verity_writer_android.cc
+++ b/payload_consumer/verity_writer_android.cc
@@ -20,6 +20,7 @@
 
 #include <algorithm>
 #include <memory>
+#include <utility>
 
 #include <base/logging.h>
 #include <base/posix/eintr_wrapper.h>
@@ -34,6 +35,128 @@
 
 namespace chromeos_update_engine {
 
+bool IncrementalEncodeFEC::Init(const uint64_t _data_offset,
+                                const uint64_t _data_size,
+                                const uint64_t _fec_offset,
+                                const uint64_t _fec_size,
+                                const uint64_t _fec_roots,
+                                const uint64_t _block_size,
+                                const bool _verify_mode) {
+  current_step_ = EncodeFECStep::kInitFDStep;
+  data_offset_ = _data_offset;
+  data_size_ = _data_size;
+  fec_offset_ = _fec_offset;
+  fec_size_ = _fec_size;
+  fec_roots_ = _fec_roots;
+  block_size_ = _block_size;
+  verify_mode_ = _verify_mode;
+  current_round_ = 0;
+  // This is the N in RS(M, N), which is the number of bytes for each rs block.
+  rs_n_ = FEC_RSM - fec_roots_;
+  rs_char_.reset(init_rs_char(FEC_PARAMS(fec_roots_)));
+  rs_blocks_.resize(block_size_ * rs_n_);
+  buffer_.resize(block_size_, 0);
+  fec_.resize(block_size_ * fec_roots_);
+  fec_read_.resize(fec_.size());
+  TEST_AND_RETURN_FALSE(data_size_ % block_size_ == 0);
+  TEST_AND_RETURN_FALSE(fec_roots_ >= 0 && fec_roots_ < FEC_RSM);
+
+  num_rounds_ = utils::DivRoundUp(data_size_ / block_size_, rs_n_);
+  TEST_AND_RETURN_FALSE(num_rounds_ * fec_roots_ * block_size_ == fec_size_);
+  TEST_AND_RETURN_FALSE(rs_char_ != nullptr);
+  return true;
+}
+
+bool IncrementalEncodeFEC::Compute(FileDescriptor* _read_fd,
+                                   FileDescriptor* _write_fd) {
+  if (current_step_ == EncodeFECStep::kInitFDStep) {
+    read_fd_ = _read_fd;
+    write_fd_ = _write_fd;
+    cache_fd_.SetFD(write_fd_);
+    write_fd_ = &cache_fd_;
+  } else if (current_step_ == EncodeFECStep::kEncodeRoundStep) {
+    // Encodes |block_size| number of rs blocks each round so that we can read
+    // one block each time instead of 1 byte to increase random read
+    // performance. This uses about 1 MiB memory for 4K block size.
+    for (size_t j = 0; j < rs_n_; j++) {
+      uint64_t offset = fec_ecc_interleave(
+          current_round_ * rs_n_ * block_size_ + j, rs_n_, num_rounds_);
+      // Don't read past |data_size|, treat them as 0.
+      if (offset >= data_size_) {
+        std::fill(buffer_.begin(), buffer_.end(), 0);
+      } else {
+        ssize_t bytes_read = 0;
+        TEST_AND_RETURN_FALSE(utils::PReadAll(read_fd_,
+                                              buffer_.data(),
+                                              buffer_.size(),
+                                              data_offset_ + offset,
+                                              &bytes_read));
+        TEST_AND_RETURN_FALSE(bytes_read >= 0);
+        TEST_AND_RETURN_FALSE(static_cast<size_t>(bytes_read) ==
+                              buffer_.size());
+      }
+      for (size_t k = 0; k < buffer_.size(); k++) {
+        rs_blocks_[k * rs_n_ + j] = buffer_[k];
+      }
+    }
+    for (size_t j = 0; j < block_size_; j++) {
+      // Encode [j * rs_n_ : (j + 1) * rs_n_) in |rs_blocks| and write
+      // |fec_roots| number of parity bytes to |j * fec_roots| in |fec|.
+      encode_rs_char(rs_char_.get(),
+                     rs_blocks_.data() + j * rs_n_,
+                     fec_.data() + j * fec_roots_);
+    }
+
+    if (verify_mode_) {
+      ssize_t bytes_read = 0;
+      TEST_AND_RETURN_FALSE(utils::PReadAll(read_fd_,
+                                            fec_read_.data(),
+                                            fec_read_.size(),
+                                            fec_offset_,
+                                            &bytes_read));
+      TEST_AND_RETURN_FALSE(bytes_read >= 0);
+      TEST_AND_RETURN_FALSE(static_cast<size_t>(bytes_read) ==
+                            fec_read_.size());
+      TEST_AND_RETURN_FALSE(fec_ == fec_read_);
+    } else {
+      CHECK(write_fd_);
+      write_fd_->Seek(fec_offset_, SEEK_SET);
+      if (!utils::WriteAll(write_fd_, fec_.data(), fec_.size())) {
+        PLOG(ERROR) << "EncodeFEC write() failed";
+        return false;
+      }
+    }
+    fec_offset_ += fec_.size();
+    current_round_++;
+  } else if (current_step_ == EncodeFECStep::kWriteStep) {
+    write_fd_->Flush();
+  }
+  UpdateState();
+  return true;
+}
+// update the current state of EncodeFEC. Can be changed to have smaller steps
+void IncrementalEncodeFEC::UpdateState() {
+  if (current_step_ == EncodeFECStep::kInitFDStep) {
+    current_step_ = EncodeFECStep::kEncodeRoundStep;
+  } else if (current_step_ == EncodeFECStep::kEncodeRoundStep &&
+             current_round_ == num_rounds_) {
+    current_step_ = EncodeFECStep::kWriteStep;
+  } else if (current_step_ == EncodeFECStep::kWriteStep) {
+    current_step_ = EncodeFECStep::kComplete;
+  }
+}
+
+bool IncrementalEncodeFEC::Finished() const {
+  return current_step_ == EncodeFECStep::kComplete;
+}
+
+double IncrementalEncodeFEC::ReportProgress() const {
+  if (num_rounds_ == 0) {
+    return 1.0;
+  }
+  return static_cast<double>(current_round_) / num_rounds_;
+}
+
 namespace verity_writer {
 std::unique_ptr<VerityWriterInterface> CreateVerityWriter() {
   return std::make_unique<VerityWriterAndroid>();
@@ -42,7 +165,15 @@
 
 bool VerityWriterAndroid::Init(const InstallPlan::Partition& partition) {
   partition_ = &partition;
-
+  LOG(INFO) << "Initializing Incremental EncodeFEC";
+  TEST_AND_RETURN_FALSE(encodeFEC_.Init(partition_->fec_data_offset,
+                                        partition_->fec_data_size,
+                                        partition_->fec_offset,
+                                        partition_->fec_size,
+                                        partition_->fec_roots,
+                                        partition_->block_size,
+                                        false /* verify_mode */));
+  hash_tree_written_ = false;
   if (partition_->hash_tree_size != 0) {
     auto hash_function =
         HashTreeBuilder::HashFunction(partition_->hash_tree_algorithm);
@@ -103,7 +234,6 @@
 
   return true;
 }
-
 bool VerityWriterAndroid::Finalize(FileDescriptor* read_fd,
                                    FileDescriptor* write_fd) {
   const auto hash_tree_data_end =
@@ -145,6 +275,56 @@
   return true;
 }
 
+bool VerityWriterAndroid::IncrementalFinalize(FileDescriptor* read_fd,
+                                              FileDescriptor* write_fd) {
+  if (!hash_tree_written_) {
+    LOG(INFO) << "Completing prework in Finalize";
+    const auto hash_tree_data_end =
+        partition_->hash_tree_data_offset + partition_->hash_tree_data_size;
+    if (total_offset_ < hash_tree_data_end) {
+      LOG(ERROR) << "Read up to " << total_offset_
+                 << " when we are expecting to read everything "
+                    "before "
+                 << hash_tree_data_end;
+      return false;
+    }
+    // All hash tree data blocks has been hashed, write hash tree to disk.
+    LOG(INFO) << "Writing verity hash tree to "
+              << partition_->readonly_target_path;
+    if (hash_tree_builder_) {
+      TEST_AND_RETURN_FALSE(hash_tree_builder_->BuildHashTree());
+      TEST_AND_RETURN_FALSE_ERRNO(
+          write_fd->Seek(partition_->hash_tree_offset, SEEK_SET));
+      auto success =
+          hash_tree_builder_->WriteHashTree([write_fd](auto data, auto size) {
+            return utils::WriteAll(write_fd, data, size);
+          });
+      // hashtree builder already prints error messages.
+      TEST_AND_RETURN_FALSE(success);
+      hash_tree_builder_.reset();
+    }
+    hash_tree_written_ = true;
+    if (partition_->fec_size != 0) {
+      LOG(INFO) << "Writing verity FEC to " << partition_->readonly_target_path;
+    }
+  }
+  if (partition_->fec_size != 0) {
+    TEST_AND_RETURN_FALSE(encodeFEC_.Compute(read_fd, write_fd));
+  }
+  return true;
+}
+bool VerityWriterAndroid::FECFinished() const {
+  if ((encodeFEC_.Finished() || partition_->fec_size == 0) &&
+      hash_tree_written_) {
+    return true;
+  }
+  return false;
+}
+
+double VerityWriterAndroid::GetProgress() {
+  return encodeFEC_.ReportProgress();
+}
+
 bool VerityWriterAndroid::EncodeFEC(FileDescriptor* read_fd,
                                     FileDescriptor* write_fd,
                                     uint64_t data_offset,
@@ -156,7 +336,8 @@
                                     bool verify_mode) {
   TEST_AND_RETURN_FALSE(data_size % block_size == 0);
   TEST_AND_RETURN_FALSE(fec_roots >= 0 && fec_roots < FEC_RSM);
-  // This is the N in RS(M, N), which is the number of bytes for each rs block.
+  // This is the N in RS(M, N), which is the number of bytes for each rs
+  // block.
   size_t rs_n = FEC_RSM - fec_roots;
   uint64_t rounds = utils::DivRoundUp(data_size / block_size, rs_n);
   TEST_AND_RETURN_FALSE(rounds * fec_roots * block_size == fec_size);
@@ -196,8 +377,8 @@
     }
     brillo::Blob fec(block_size * fec_roots);
     for (size_t j = 0; j < block_size; j++) {
-      // Encode [j * rs_n : (j + 1) * rs_n) in |rs_blocks| and write |fec_roots|
-      // number of parity bytes to |j * fec_roots| in |fec|.
+      // Encode [j * rs_n : (j + 1) * rs_n) in |rs_blocks| and write
+      // |fec_roots| number of parity bytes to |j * fec_roots| in |fec|.
       encode_rs_char(rs_char.get(),
                      rs_blocks.data() + j * rs_n,
                      fec.data() + j * fec_roots);
diff --git a/payload_consumer/verity_writer_android.h b/payload_consumer/verity_writer_android.h
index a6a4920..1aaafd5 100644
--- a/payload_consumer/verity_writer_android.h
+++ b/payload_consumer/verity_writer_android.h
@@ -21,11 +21,63 @@
 #include <string>
 
 #include <verity/hash_tree_builder.h>
+#include <base/logging.h>
+#include <base/posix/eintr_wrapper.h>
+#include <fec/ecc.h>
+extern "C" {
+#include <fec.h>
+}
 
 #include "payload_consumer/file_descriptor.h"
+#include "update_engine/payload_consumer/cached_file_descriptor.h"
 #include "update_engine/payload_consumer/verity_writer_interface.h"
 
 namespace chromeos_update_engine {
+enum class EncodeFECStep {
+  kInitFDStep,
+  kEncodeRoundStep,
+  kWriteStep,
+  kComplete
+};
+class IncrementalEncodeFEC {
+ public:
+  IncrementalEncodeFEC()
+      : rs_char_(nullptr, &free_rs_char), cache_fd_(nullptr, 1 * (1 << 20)) {}
+  // Initialize all member variables needed to performe FEC Computation
+  bool Init(const uint64_t _data_offset,
+            const uint64_t _data_size,
+            const uint64_t _fec_offset,
+            const uint64_t _fec_size,
+            const uint64_t _fec_roots,
+            const uint64_t _block_size,
+            const bool _verify_mode);
+  bool Compute(FileDescriptor* _read_fd, FileDescriptor* _write_fd);
+  void UpdateState();
+  bool Finished() const;
+  void Reset();
+  double ReportProgress() const;
+
+ private:
+  brillo::Blob rs_blocks_;
+  brillo::Blob buffer_;
+  brillo::Blob fec_;
+  brillo::Blob fec_read_;
+  EncodeFECStep current_step_;
+  size_t current_round_;
+  size_t num_rounds_;
+  FileDescriptor* read_fd_;
+  FileDescriptor* write_fd_;
+  uint64_t data_offset_;
+  uint64_t data_size_;
+  uint64_t fec_offset_;
+  uint64_t fec_size_;
+  uint64_t fec_roots_;
+  uint64_t block_size_;
+  size_t rs_n_;
+  bool verify_mode_;
+  std::unique_ptr<void, decltype(&free_rs_char)> rs_char_;
+  UnownedCachedFileDescriptor cache_fd_;
+};
 
 class VerityWriterAndroid : public VerityWriterInterface {
  public:
@@ -35,7 +87,10 @@
   bool Init(const InstallPlan::Partition& partition);
   bool Update(uint64_t offset, const uint8_t* buffer, size_t size) override;
   bool Finalize(FileDescriptor* read_fd, FileDescriptor* write_fd) override;
-
+  bool IncrementalFinalize(FileDescriptor* read_fd,
+                           FileDescriptor* write_fd) override;
+  double GetProgress() override;
+  bool FECFinished() const override;
   // Read [data_offset : data_offset + data_size) from |path| and encode FEC
   // data, if |verify_mode|, then compare the encoded FEC with the one in
   // |path|, otherwise write the encoded FEC to |path|. We can't encode as we go
@@ -61,6 +116,9 @@
                         bool verify_mode);
 
  private:
+  // stores the state of EncodeFEC
+  IncrementalEncodeFEC encodeFEC_;
+  bool hash_tree_written_ = false;
   const InstallPlan::Partition* partition_ = nullptr;
 
   std::unique_ptr<HashTreeBuilder> hash_tree_builder_;
diff --git a/payload_consumer/verity_writer_interface.h b/payload_consumer/verity_writer_interface.h
index 432ede7..3ebe768 100644
--- a/payload_consumer/verity_writer_interface.h
+++ b/payload_consumer/verity_writer_interface.h
@@ -22,6 +22,7 @@
 
 #include <base/macros.h>
 
+#include "common/utils.h"
 #include "payload_consumer/file_descriptor.h"
 #include "update_engine/payload_consumer/install_plan.h"
 
@@ -38,8 +39,24 @@
   // blocks has passed.
   virtual bool Update(uint64_t offset, const uint8_t* buffer, size_t size) = 0;
 
+  // Deprecated function -> use IncrementalFinalize to allow verity writes to be
+  // interrupted. left for backwards compatibility
+  virtual bool Finalize(FileDescriptor* read_fd, FileDescriptor* write_fd) {
+    while (!FECFinished()) {
+      TEST_AND_RETURN_FALSE(IncrementalFinalize(read_fd, write_fd));
+    }
+    return true;
+  }
+
   // Write hash tree && FEC data to underlying fd, if they are present
-  virtual bool Finalize(FileDescriptor* read_fd, FileDescriptor* write_fd) = 0;
+  virtual bool IncrementalFinalize(FileDescriptor* read_fd,
+                                   FileDescriptor* write_fd) = 0;
+
+  // Returns true once FEC data is finished writing
+  virtual bool FECFinished() const = 0;
+
+  // Gets progress report on FEC write
+  virtual double GetProgress() = 0;
 
  protected:
   VerityWriterInterface() = default;
diff --git a/payload_consumer/xor_extent_writer.cc b/payload_consumer/xor_extent_writer.cc
index 31567f2..4534c05 100644
--- a/payload_consumer/xor_extent_writer.cc
+++ b/payload_consumer/xor_extent_writer.cc
@@ -48,21 +48,39 @@
     const auto merge_op = merge_op_opt.value();
     TEST_AND_RETURN_FALSE(merge_op->has_src_extent());
     TEST_AND_RETURN_FALSE(merge_op->has_dst_extent());
-    if (merge_op->dst_extent() != xor_ext) {
-      LOG(ERROR) << "Each xor extent is expected to correspond to a complete "
-                    "MergeOp, extent in value: "
-                 << merge_op->dst_extent() << " extent in key: " << xor_ext;
-      return false;
-    }
-    if (xor_ext.start_block() + xor_ext.num_blocks() >
-        extent.start_block() + extent.num_blocks()) {
+    if (!ExtentContains(extent, xor_ext)) {
       LOG(ERROR) << "CowXor merge op extent should be completely inside "
                     "InstallOp's extent. merge op extent: "
                  << xor_ext << " InstallOp extent: " << extent;
       return false;
     }
+    if (!ExtentContains(merge_op->dst_extent(), xor_ext)) {
+      LOG(ERROR) << "CowXor op extent should be completely inside "
+                    "xor_map's extent. merge op extent: "
+                 << xor_ext << " xor_map extent: " << merge_op->dst_extent();
+      return false;
+    }
     const auto src_offset = merge_op->src_offset();
-    const auto src_block = merge_op->src_extent().start_block();
+    const auto src_block = merge_op->src_extent().start_block() +
+                           xor_ext.start_block() -
+                           merge_op->dst_extent().start_block();
+    const auto i = xor_ext.start_block() - extent.start_block();
+    const auto dst_block_data =
+        static_cast<const unsigned char*>(bytes) + i * BlockSize();
+    const auto is_out_of_bound_read =
+        (src_block + xor_ext.num_blocks()) * BlockSize() + src_offset >
+            partition_size_ &&
+        partition_size_ != 0;
+    if (is_out_of_bound_read) {
+      LOG(INFO) << "Getting partial read for last block, converting "
+                   "XOR operation to a regular replace "
+                << xor_ext;
+      TEST_AND_RETURN_FALSE(
+          cow_writer_->AddRawBlocks(xor_ext.start_block(),
+                                    dst_block_data,
+                                    xor_ext.num_blocks() * BlockSize()));
+      continue;
+    }
     xor_block_data.resize(BlockSize() * xor_ext.num_blocks());
     ssize_t bytes_read = 0;
     TEST_AND_RETURN_FALSE_ERRNO(
@@ -72,14 +90,12 @@
                         src_offset + src_block * BlockSize(),
                         &bytes_read));
     if (bytes_read != static_cast<ssize_t>(xor_block_data.size())) {
-      LOG(ERROR) << "bytes_read: " << bytes_read;
+      LOG(ERROR) << "bytes_read: " << bytes_read << ", expected to read "
+                 << xor_block_data.size() << " at block " << src_block
+                 << " offset " << src_offset;
       return false;
     }
 
-    const auto i = xor_ext.start_block() - extent.start_block();
-
-    const auto dst_block_data =
-        static_cast<const unsigned char*>(bytes) + i * BlockSize();
     std::transform(xor_block_data.cbegin(),
                    xor_block_data.cbegin() + xor_block_data.size(),
                    dst_block_data,
diff --git a/payload_consumer/xor_extent_writer.h b/payload_consumer/xor_extent_writer.h
index 35565ea..57c99c2 100644
--- a/payload_consumer/xor_extent_writer.h
+++ b/payload_consumer/xor_extent_writer.h
@@ -19,12 +19,9 @@
 
 #include <vector>
 
+#include "common/utils.h"
 #include "update_engine/payload_consumer/block_extent_writer.h"
 #include "update_engine/payload_consumer/extent_map.h"
-#include "update_engine/payload_consumer/extent_reader.h"
-#include "update_engine/payload_consumer/extent_writer.h"
-#include "update_engine/payload_generator/extent_ranges.h"
-#include "update_engine/payload_generator/extent_utils.h"
 
 #include <update_engine/update_metadata.pb.h>
 #include <libsnapshot/cow_writer.h>
@@ -38,11 +35,13 @@
   XORExtentWriter(const InstallOperation& op,
                   FileDescriptorPtr source_fd,
                   android::snapshot::ICowWriter* cow_writer,
-                  const ExtentMap<const CowMergeOperation*>& xor_map)
+                  const ExtentMap<const CowMergeOperation*>& xor_map,
+                  size_t partition_size)
       : src_extents_(op.src_extents()),
         source_fd_(source_fd),
         xor_map_(xor_map),
-        cow_writer_(cow_writer) {
+        cow_writer_(cow_writer),
+        partition_size_(partition_size) {
     CHECK(source_fd->IsOpen());
   }
   ~XORExtentWriter() = default;
@@ -61,6 +60,7 @@
   const FileDescriptorPtr source_fd_;
   const ExtentMap<const CowMergeOperation*>& xor_map_;
   android::snapshot::ICowWriter* cow_writer_;
+  const size_t partition_size_;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/xor_extent_writer_unittest.cc b/payload_consumer/xor_extent_writer_unittest.cc
index 7f35bc2..55c3c6c 100644
--- a/payload_consumer/xor_extent_writer_unittest.cc
+++ b/payload_consumer/xor_extent_writer_unittest.cc
@@ -94,7 +94,8 @@
   ASSERT_TRUE(xor_map_.AddExtent(op3.dst_extent(), &op3));
   *op_.add_src_extents() = ExtentForRange(12, 4);
   *op_.add_dst_extents() = ExtentForRange(320, 4);
-  XORExtentWriter writer_{op_, source_fd_, &cow_writer_, xor_map_};
+  XORExtentWriter writer_{
+      op_, source_fd_, &cow_writer_, xor_map_, NUM_BLOCKS * kBlockSize};
 
   // OTA op:
   // [5-6] => [5-6], [45-47] => [455-457], [12-15] => [320-323]
@@ -131,4 +132,97 @@
   ASSERT_TRUE(writer_.Write(zeros->data(), 9 * kBlockSize));
 }
 
+TEST_F(XorExtentWriterTest, SubsetExtentTest) {
+  constexpr auto COW_XOR = CowMergeOperation::COW_XOR;
+  ON_CALL(cow_writer_, EmitXorBlocks(_, _, _, _, _))
+      .WillByDefault(Return(true));
+
+  const auto op3 = CreateCowMergeOperation(
+      ExtentForRange(12, 4), ExtentForRange(320, 4), COW_XOR, 777);
+  ASSERT_TRUE(xor_map_.AddExtent(op3.dst_extent(), &op3));
+
+  *op_.add_src_extents() = ExtentForRange(12, 3);
+  *op_.add_dst_extents() = ExtentForRange(320, 3);
+  *op_.add_src_extents() = ExtentForRange(20, 3);
+  *op_.add_dst_extents() = ExtentForRange(420, 3);
+  *op_.add_src_extents() = ExtentForRange(15, 1);
+  *op_.add_dst_extents() = ExtentForRange(323, 1);
+  XORExtentWriter writer_{
+      op_, source_fd_, &cow_writer_, xor_map_, NUM_BLOCKS * kBlockSize};
+
+  // OTA op:
+  // [12-14] => [320-322], [20-22] => [420-422], [15-16] => [323-324]
+
+  // merge op:
+  // [12-16] => [321-322]
+
+  // Expected result:
+  // [12-16] should be XOR blocks
+  // [420-422] should be regular replace blocks
+
+  auto zeros = utils::GetReadonlyZeroBlock(kBlockSize * 7);
+  EXPECT_CALL(
+      cow_writer_,
+      EmitRawBlocks(420, zeros->data() + 3 * kBlockSize, kBlockSize * 3))
+      .WillOnce(Return(true));
+
+  EXPECT_CALL(cow_writer_, EmitXorBlocks(320, _, kBlockSize * 3, 12, 777))
+      .WillOnce(Return(true));
+  EXPECT_CALL(cow_writer_, EmitXorBlocks(323, _, kBlockSize, 15, 777))
+      .WillOnce(Return(true));
+
+  ASSERT_TRUE(writer_.Init(op_.dst_extents(), kBlockSize));
+  ASSERT_TRUE(writer_.Write(zeros->data(), zeros->size()));
+}
+
+TEST_F(XorExtentWriterTest, LastBlockTest) {
+  constexpr auto COW_XOR = CowMergeOperation::COW_XOR;
+  ON_CALL(cow_writer_, EmitXorBlocks(_, _, _, _, _))
+      .WillByDefault(Return(true));
+
+  const auto op3 = CreateCowMergeOperation(
+      ExtentForRange(NUM_BLOCKS - 1, 1), ExtentForRange(2, 1), COW_XOR, 777);
+  ASSERT_TRUE(xor_map_.AddExtent(op3.dst_extent(), &op3));
+
+  *op_.add_src_extents() = ExtentForRange(12, 3);
+  *op_.add_dst_extents() = ExtentForRange(320, 3);
+
+  *op_.add_src_extents() = ExtentForRange(20, 3);
+  *op_.add_dst_extents() = ExtentForRange(420, 3);
+
+  *op_.add_src_extents() = ExtentForRange(NUM_BLOCKS - 3, 3);
+  *op_.add_dst_extents() = ExtentForRange(2, 3);
+  XORExtentWriter writer_{
+      op_, source_fd_, &cow_writer_, xor_map_, NUM_BLOCKS * kBlockSize};
+
+  // OTA op:
+  // [12-14] => [320-322], [20-22] => [420-422], [NUM_BLOCKS-3] => [2-5]
+
+  // merge op:
+  // [NUM_BLOCKS-1] => [2-3]
+
+  // Expected result:
+  // [12-16] should be REPLACE blocks
+  // [420-422] should be REPLACE blocks
+  // [2-4] should be REPLACE blocks
+
+  auto zeros = utils::GetReadonlyZeroBlock(kBlockSize * 9);
+  EXPECT_CALL(cow_writer_, EmitRawBlocks(320, zeros->data(), kBlockSize * 3))
+      .WillOnce(Return(true));
+  EXPECT_CALL(
+      cow_writer_,
+      EmitRawBlocks(420, zeros->data() + 3 * kBlockSize, kBlockSize * 3))
+      .WillOnce(Return(true));
+
+  EXPECT_CALL(cow_writer_,
+              EmitRawBlocks(2, zeros->data() + 6 * kBlockSize, kBlockSize))
+      .WillOnce(Return(true));
+  EXPECT_CALL(cow_writer_,
+              EmitRawBlocks(3, zeros->data() + 7 * kBlockSize, kBlockSize * 2))
+      .WillOnce(Return(true));
+
+  ASSERT_TRUE(writer_.Init(op_.dst_extents(), kBlockSize));
+  ASSERT_TRUE(writer_.Write(zeros->data(), zeros->size()));
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/ab_generator.cc b/payload_generator/ab_generator.cc
index 25cafe3..570ce45 100644
--- a/payload_generator/ab_generator.cc
+++ b/payload_generator/ab_generator.cc
@@ -25,7 +25,6 @@
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/annotated_operation.h"
-#include "update_engine/payload_generator/bzip.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/delta_diff_utils.h"
 
diff --git a/payload_generator/ab_generator_unittest.cc b/payload_generator/ab_generator_unittest.cc
index 84eeb77..e00e6bd 100644
--- a/payload_generator/ab_generator_unittest.cc
+++ b/payload_generator/ab_generator_unittest.cc
@@ -65,9 +65,9 @@
     test_utils::FillWithData(&part_data);
   } else {
     std::mt19937 gen(12345);
-    std::uniform_int_distribution<uint8_t> dis(0, 255);
+    std::uniform_int_distribution<uint16_t> dis(0, 255);
     for (uint32_t i = 0; i < part_size; i++)
-      part_data.push_back(dis(gen));
+      part_data.push_back(static_cast<uint8_t>(dis(gen)));
   }
   ASSERT_EQ(part_size, part_data.size());
   ScopedTempFile part_file("SplitReplaceOrReplaceXzTest_part.XXXXXX");
@@ -213,9 +213,9 @@
     test_utils::FillWithData(&part_data);
   } else {
     std::mt19937 gen(12345);
-    std::uniform_int_distribution<uint8_t> dis(0, 255);
+    std::uniform_int_distribution<uint16_t> dis(0, 255);
     for (uint32_t i = 0; i < part_size; i++)
-      part_data.push_back(dis(gen));
+      part_data.push_back(static_cast<uint8_t>(dis(gen)));
   }
   ASSERT_EQ(part_size, part_data.size());
   ScopedTempFile part_file("MergeReplaceOrReplaceXzTest_part.XXXXXX");
diff --git a/payload_generator/cow_size_estimator.cc b/payload_generator/cow_size_estimator.cc
index 3a23f44..0c918e7 100644
--- a/payload_generator/cow_size_estimator.cc
+++ b/payload_generator/cow_size_estimator.cc
@@ -52,10 +52,9 @@
   for (const auto& op : merge_operations) {
     if (op.type() == CowMergeOperation::COW_COPY) {
       visited.AddExtent(op.dst_extent());
-      for (size_t i = 0; i < op.dst_extent().num_blocks(); i++) {
-        cow_writer->AddCopy(op.dst_extent().start_block() + i,
-                            op.src_extent().start_block() + i);
-      }
+      cow_writer->AddCopy(op.dst_extent().start_block(),
+                          op.src_extent().start_block(),
+                          op.dst_extent().num_blocks());
     } else if (op.type() == CowMergeOperation::COW_XOR && xor_enabled) {
       CHECK_NE(source_fd, nullptr) << "Source fd is required to enable XOR ops";
       CHECK(source_fd->IsOpen());
@@ -106,14 +105,15 @@
     cow_writer->AddLabel(0);
   }
   for (const auto& op : operations) {
+    cow_writer->AddLabel(0);
     if (op.type() == InstallOperation::ZERO) {
       for (const auto& ext : op.dst_extents()) {
         visited.AddExtent(ext);
         cow_writer->AddZeroBlocks(ext.start_block(), ext.num_blocks());
       }
-      cow_writer->AddLabel(0);
     }
   }
+  cow_writer->AddLabel(0);
   const size_t last_block = partition_size / block_size;
   const auto unvisited_extents =
       FilterExtentRanges({ExtentForRange(0, last_block)}, visited);
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index 389cf97..152da4d 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -57,8 +57,8 @@
 #include <zucchini/zucchini.h>
 
 #include "update_engine/common/hash_calculator.h"
-#include "update_engine/common/subprocess.h"
 #include "update_engine/common/utils.h"
+#include "update_engine/lz4diff/lz4diff.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/ab_generator.h"
 #include "update_engine/payload_generator/block_mapping.h"
@@ -67,10 +67,7 @@
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/extent_ranges.h"
 #include "update_engine/payload_generator/extent_utils.h"
-#include "update_engine/payload_generator/merge_sequence_generator.h"
-#include "update_engine/payload_generator/squashfs_filesystem.h"
 #include "update_engine/payload_generator/xz.h"
-#include "update_engine/lz4diff/lz4diff.h"
 
 using std::list;
 using std::map;
@@ -220,7 +217,7 @@
       config_.OperationEnabled(InstallOperation::LZ4DIFF_BSDIFF) &&
       config_.OperationEnabled(InstallOperation::LZ4DIFF_PUFFDIFF)) {
     brillo::Blob patch;
-    InstallOperation::Type op_type;
+    InstallOperation::Type op_type{};
     if (Lz4Diff(old_data_,
                 new_data_,
                 old_block_info_,
@@ -680,6 +677,10 @@
 
   size_t max_threads = GetMaxThreads();
 
+  if (config.max_threads > 0) {
+    max_threads = config.max_threads;
+  }
+
   // Sort the files in descending order based on number of new blocks to make
   // sure we start the largest ones first.
   if (file_delta_processors.size() > max_threads) {
@@ -1071,7 +1072,7 @@
 
   // Try generating a full operation for the given new data, regardless of the
   // old_data.
-  InstallOperation::Type op_type;
+  InstallOperation::Type op_type{};
   TEST_AND_RETURN_FALSE(
       GenerateBestFullOperation(new_data, version, &data_blob, &op_type));
   operation.set_type(op_type);
diff --git a/payload_generator/delta_diff_utils.h b/payload_generator/delta_diff_utils.h
index dcb6867..bfbcdf7 100644
--- a/payload_generator/delta_diff_utils.h
+++ b/payload_generator/delta_diff_utils.h
@@ -25,8 +25,9 @@
 #include <brillo/secure_blob.h>
 #include <puffin/puffdiff.h>
 
-#include "payload_generator/deflate_utils.h"
+#include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/annotated_operation.h"
+#include "update_engine/payload_generator/deflate_utils.h"
 #include "update_engine/payload_generator/extent_ranges.h"
 #include "update_engine/payload_generator/payload_generation_config.h"
 #include "update_engine/update_metadata.pb.h"
diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc
index 7348d74..b698339 100644
--- a/payload_generator/delta_diff_utils_unittest.cc
+++ b/payload_generator/delta_diff_utils_unittest.cc
@@ -197,9 +197,9 @@
   // Make a blob with random data that won't compress well.
   brillo::Blob random_data;
   std::mt19937 gen(12345);
-  std::uniform_int_distribution<uint8_t> dis(0, 255);
+  std::uniform_int_distribution<uint16_t> dis(0, 255);
   for (uint32_t i = 0; i < kBlockSize; i++) {
-    random_data.push_back(dis(gen));
+    random_data.push_back(static_cast<uint8_t>(dis(gen)));
   }
 
   for (int i = 0; i < 2; i++) {
diff --git a/payload_generator/erofs_filesystem.cc b/payload_generator/erofs_filesystem.cc
index 677b473..bf10d8c 100644
--- a/payload_generator/erofs_filesystem.cc
+++ b/payload_generator/erofs_filesystem.cc
@@ -76,28 +76,39 @@
          block.m_algorithmformat != Z_EROFS_COMPRESSION_SHIFTED;
 }
 
-static void FillCompressedBlockInfo(FilesystemInterface::File* p_file,
-                                    std::string_view image_filename,
-                                    struct erofs_inode* inode) {
+static void FillExtentInfo(FilesystemInterface::File* p_file,
+                           std::string_view image_filename,
+                           struct erofs_inode* inode) {
   auto& file = *p_file;
-  if (!file.is_compressed) {
-    return;
-  }
 
   struct erofs_map_blocks block {};
   block.m_la = 0;
   block.index = UINT_MAX;
 
-  const erofs_off_t uncompressed_size = file.file_stat.st_size;
   auto& compressed_blocks = file.compressed_file_info.blocks;
   auto last_pa = block.m_pa;
   auto last_plen = 0;
-  while (block.m_la < uncompressed_size) {
+  LOG(INFO) << file.name << ", isize: " << inode->i_size;
+  while (block.m_la < inode->i_size) {
     auto error = ErofsMapBlocks(inode, &block, EROFS_GET_BLOCKS_FIEMAP);
     if (error) {
       LOG(FATAL) << "Failed to map blocks for " << file.name << " in "
                  << image_filename;
     }
+    if (block.m_pa % kBlockSize != 0) {
+      // EROFS might put the last block on unalighed addresses, because the last
+      // block is often < 1 full block size. That is fine, we can usually
+      // tolerate small amount of data being unaligned.
+      if (block.m_llen >= kBlockSize ||
+          block.m_la + block.m_llen != inode->i_size) {
+        LOG(ERROR) << "File `" << file.name
+                   << "` has unaligned blocks: at physical byte offset: "
+                   << block.m_pa << ", "
+                   << " length: " << block.m_plen
+                   << ", logical offset: " << block.m_la;
+      }
+      break;
+    }
     // Certain uncompressed blocks have physical size > logical size. Usually
     // the physical block contains bunch of trailing zeros. Include thees
     // bytes in the logical size as well.
@@ -116,16 +127,18 @@
     } else {
       last_plen += block.m_plen;
     }
-    // If logical size and physical size are the same, this block is
-    // uncompressed. Join consecutive uncompressed blocks to save a bit memory
-    // storing metadata.
-    if (block.m_llen == block.m_plen && !compressed_blocks.empty() &&
-        !compressed_blocks.back().IsCompressed()) {
-      compressed_blocks.back().compressed_length += block.m_llen;
-      compressed_blocks.back().uncompressed_length += block.m_llen;
-    } else {
-      compressed_blocks.push_back(
-          CompressedBlock(block.m_la, block.m_plen, block.m_llen));
+    if (file.is_compressed) {
+      // If logical size and physical size are the same, this block is
+      // uncompressed. Join consecutive uncompressed blocks to save a bit memory
+      // storing metadata.
+      if (block.m_llen == block.m_plen && !compressed_blocks.empty() &&
+          !compressed_blocks.back().IsCompressed()) {
+        compressed_blocks.back().compressed_length += block.m_llen;
+        compressed_blocks.back().uncompressed_length += block.m_llen;
+      } else {
+        compressed_blocks.push_back(
+            CompressedBlock(block.m_la, block.m_plen, block.m_llen));
+      }
     }
 
     block.m_la += block.m_llen;
@@ -154,24 +167,28 @@
     PLOG(INFO) << "Failed to open " << filename;
     return nullptr;
   }
-  DEFER { dev_close(); };
+  DEFER {
+    dev_close();
+  };
 
   if (const auto err = erofs_read_superblock(); err) {
     PLOG(INFO) << "Failed to parse " << filename << " as EROFS image";
     return nullptr;
   }
-  struct stat st;
+  struct stat st {};
   if (const auto err = fstat(erofs_devfd, &st); err) {
     PLOG(ERROR) << "Failed to stat() " << filename;
     return nullptr;
   }
   const time_t time = sbi.build_time;
-  LOG(INFO) << "Parsed EROFS image of size " << st.st_size << " built in "
-            << ctime(&time) << " " << filename;
   std::vector<File> files;
   if (!ErofsFilesystem::GetFiles(filename, &files, algo)) {
     return nullptr;
   }
+
+  LOG(INFO) << "Parsed EROFS image of size " << st.st_size << " built in "
+            << ctime(&time) << " " << filename
+            << ", number of files: " << files.size();
   LOG(INFO) << "Using compression algo " << algo << " for " << filename;
   // private ctor doesn't work with make_unique
   return std::unique_ptr<ErofsFilesystem>(
@@ -191,7 +208,7 @@
     if (info.ctx.de_ftype != EROFS_FT_REG_FILE) {
       return 0;
     }
-    struct erofs_inode inode;
+    struct erofs_inode inode {};
     inode.nid = info.ctx.de_nid;
     int err = erofs_read_inode_from_disk(&inode);
     if (err) {
@@ -225,7 +242,7 @@
 
     file.file_stat.st_size = uncompressed_size;
     file.file_stat.st_ino = inode.nid;
-    FillCompressedBlockInfo(&file, filename, &inode);
+    FillExtentInfo(&file, filename, &inode);
     file.compressed_file_info.algo = algo;
 
     files->emplace_back(std::move(file));
diff --git a/payload_generator/extent_utils.h b/payload_generator/extent_utils.h
index bd9fc8b..f8d36e7 100644
--- a/payload_generator/extent_utils.h
+++ b/payload_generator/extent_utils.h
@@ -105,25 +105,25 @@
 // }
 struct BlockIterator {
   explicit BlockIterator(
-      const google::protobuf::RepeatedPtrField<Extent>& src_extents)
-      : src_extents_(src_extents) {}
+      const google::protobuf::RepeatedPtrField<Extent>& extents)
+      : extents_(extents) {}
 
   BlockIterator& operator++() {
-    CHECK_LT(cur_extent_, src_extents_.size());
+    CHECK_LT(cur_extent_, extents_.size());
     block_offset_++;
-    if (block_offset_ >= src_extents_[cur_extent_].num_blocks()) {
+    if (block_offset_ >= extents_[cur_extent_].num_blocks()) {
       cur_extent_++;
       block_offset_ = 0;
     }
     return *this;
   }
 
-  [[nodiscard]] bool is_end() { return cur_extent_ >= src_extents_.size(); }
+  [[nodiscard]] bool is_end() { return cur_extent_ >= extents_.size(); }
   [[nodiscard]] uint64_t operator*() {
-    return src_extents_[cur_extent_].start_block() + block_offset_;
+    return extents_[cur_extent_].start_block() + block_offset_;
   }
 
-  const google::protobuf::RepeatedPtrField<Extent>& src_extents_;
+  const google::protobuf::RepeatedPtrField<Extent>& extents_;
   int cur_extent_ = 0;
   size_t block_offset_ = 0;
 };
@@ -151,6 +151,13 @@
          block < extent.start_block() + extent.num_blocks();
 }
 
+// return true iff |big| extent contains |small| extent
+constexpr bool ExtentContains(const Extent& big, const Extent& small) {
+  return big.start_block() <= small.start_block() &&
+         small.start_block() + small.num_blocks() <=
+             big.start_block() + big.num_blocks();
+}
+
 }  // namespace chromeos_update_engine
 
 #endif  // UPDATE_ENGINE_PAYLOAD_GENERATOR_EXTENT_UTILS_H_
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index ef36a6d..6616ee1 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -14,10 +14,12 @@
 // limitations under the License.
 //
 
+#include <cstring>
 #include <map>
 #include <string>
 #include <vector>
 
+#include <android-base/strings.h>
 #include <base/bind.h>
 #include <base/files/file_path.h>
 #include <base/files/file_util.h>
@@ -25,10 +27,11 @@
 #include <base/strings/string_number_conversions.h>
 #include <base/strings/string_split.h>
 #include <base/strings/string_util.h>
-#include <brillo/flag_helper.h>
 #include <brillo/key_value_store.h>
 #include <brillo/message_loops/base_message_loop.h>
+#include <unistd.h>
 #include <xz.h>
+#include <gflags/gflags.h>
 
 #include "update_engine/common/download_action.h"
 #include "update_engine/common/fake_boot_control.h"
@@ -123,7 +126,7 @@
   vector<brillo::Blob> payload_signatures, metadata_signatures;
   SignatureFileFlagToBlobs(payload_signature_file, &payload_signatures);
   SignatureFileFlagToBlobs(metadata_signature_file, &metadata_signatures);
-  uint64_t final_metadata_size;
+  uint64_t final_metadata_size{};
   CHECK(PayloadSigner::AddSignatureToPayload(in_file,
                                              signature_sizes,
                                              payload_signatures,
@@ -165,7 +168,7 @@
   void ProcessingStopped(const ActionProcessor* processor) override {
     brillo::MessageLoop::current()->BreakLoop();
   }
-  ErrorCode code_;
+  ErrorCode code_{};
 };
 
 // TODO(deymo): Move this function to a new file and make the delta_performer
@@ -303,159 +306,201 @@
   return true;
 }
 
+DEFINE_string(old_image, "", "Path to the old rootfs");
+DEFINE_string(new_image, "", "Path to the new rootfs");
+DEFINE_string(old_kernel, "", "Path to the old kernel partition image");
+DEFINE_string(new_kernel, "", "Path to the new kernel partition image");
+DEFINE_string(old_partitions,
+              "",
+              "Path to the old partitions. To pass multiple partitions, use "
+              "a single argument with a colon between paths, e.g. "
+              "/path/to/part:/path/to/part2::/path/to/last_part . Path can "
+              "be empty, but it has to match the order of partition_names.");
+DEFINE_string(new_partitions,
+              "",
+              "Path to the new partitions. To pass multiple partitions, use "
+              "a single argument with a colon between paths, e.g. "
+              "/path/to/part:/path/to/part2:/path/to/last_part . Path has "
+              "to match the order of partition_names.");
+DEFINE_string(old_mapfiles,
+              "",
+              "Path to the .map files associated with the partition files "
+              "in the old partition. The .map file is normally generated "
+              "when creating the image in Android builds. Only recommended "
+              "for unsupported filesystem. Pass multiple files separated by "
+              "a colon as with -old_partitions.");
+DEFINE_string(new_mapfiles,
+              "",
+              "Path to the .map files associated with the partition files "
+              "in the new partition, similar to the -old_mapfiles flag.");
+DEFINE_string(partition_names,
+              string(kPartitionNameRoot) + ":" + kPartitionNameKernel,
+              "Names of the partitions. To pass multiple names, use a single "
+              "argument with a colon between names, e.g. "
+              "name:name2:name3:last_name . Name can not be empty, and it "
+              "has to match the order of partitions.");
+DEFINE_string(in_file,
+              "",
+              "Path to input delta payload file used to hash/sign payloads "
+              "and apply delta over old_image (for debugging)");
+DEFINE_string(out_file, "", "Path to output delta payload file");
+DEFINE_string(out_hash_file, "", "Path to output hash file");
+DEFINE_string(out_metadata_hash_file, "", "Path to output metadata hash file");
+DEFINE_string(out_metadata_size_file, "", "Path to output metadata size file");
+DEFINE_string(private_key, "", "Path to private key in .pem format");
+DEFINE_string(public_key, "", "Path to public key in .pem format");
+DEFINE_int32(public_key_version,
+             -1,
+             "DEPRECATED. Key-check version # of client");
+DEFINE_string(signature_size,
+              "",
+              "Raw signature size used for hash calculation. "
+              "You may pass in multiple sizes by colon separating them. E.g. "
+              "2048:2048:4096 will assume 3 signatures, the first two with "
+              "2048 size and the last 4096.");
+DEFINE_string(payload_signature_file,
+              "",
+              "Raw signature file to sign payload with. To pass multiple "
+              "signatures, use a single argument with a colon between paths, "
+              "e.g. /path/to/sig:/path/to/next:/path/to/last_sig . Each "
+              "signature will be assigned a client version, starting from "
+              "kSignatureOriginalVersion.");
+DEFINE_string(metadata_signature_file,
+              "",
+              "Raw signature file with the signature of the metadata hash. "
+              "To pass multiple signatures, use a single argument with a "
+              "colon between paths, "
+              "e.g. /path/to/sig:/path/to/next:/path/to/last_sig .");
+DEFINE_int32(chunk_size,
+             200 * 1024 * 1024,
+             "Payload chunk size (-1 for whole files)");
+DEFINE_uint64(rootfs_partition_size,
+              chromeos_update_engine::kRootFSPartitionSize,
+              "RootFS partition size for the image once installed");
+DEFINE_uint64(major_version,
+              2,
+              "The major version of the payload being generated.");
+DEFINE_int32(minor_version,
+             -1,
+             "The minor version of the payload being generated "
+             "(-1 means autodetect).");
+DEFINE_string(properties_file,
+              "",
+              "If passed, dumps the payload properties of the payload passed "
+              "in --in_file and exits. Look at --properties_format.");
+DEFINE_string(properties_format,
+              kPayloadPropertiesFormatKeyValue,
+              "Defines the format of the --properties_file. The acceptable "
+              "values are: key-value (default) and json");
+DEFINE_int64(max_timestamp,
+             0,
+             "The maximum timestamp of the OS allowed to apply this "
+             "payload.");
+DEFINE_string(security_patch_level,
+              "",
+              "The security patch level of this OTA. Devices with a newer SPL "
+              "will not be allowed to apply this payload");
+DEFINE_string(
+    partition_timestamps,
+    "",
+    "The per-partition maximum timestamps which the OS allowed to apply this "
+    "payload. Passed in comma separated pairs, e.x. system:1234,vendor:5678");
+
+DEFINE_string(new_postinstall_config_file,
+              "",
+              "A config file specifying postinstall related metadata. "
+              "Only allowed in major version 2 or newer.");
+DEFINE_string(dynamic_partition_info_file,
+              "",
+              "An info file specifying dynamic partition metadata. "
+              "Only allowed in major version 2 or newer.");
+DEFINE_bool(disable_fec_computation,
+            false,
+            "Disables the fec data computation on device.");
+DEFINE_bool(disable_verity_computation,
+            false,
+            "Disables the verity data computation on device.");
+DEFINE_string(out_maximum_signature_size_file,
+              "",
+              "Path to the output maximum signature size given a private key.");
+DEFINE_bool(is_partial_update,
+            false,
+            "The payload only targets a subset of partitions on the device,"
+            "e.g. generic kernel image update.");
+DEFINE_bool(
+    disable_vabc,
+    false,
+    "Whether to disable Virtual AB Compression when installing the OTA");
+DEFINE_bool(enable_vabc_xor,
+            false,
+            "Whether to use Virtual AB Compression XOR feature");
+DEFINE_string(apex_info_file,
+              "",
+              "Path to META/apex_info.pb found in target build");
+DEFINE_string(compressor_types,
+              "bz2:brotli",
+              "Colon ':' separated list of compressors. Allowed valures are "
+              "bz2 and brotli.");
+DEFINE_bool(enable_lz4diff,
+            false,
+            "Whether to enable LZ4diff feature when processing EROFS images.");
+
+DEFINE_bool(
+    enable_zucchini,
+    true,
+    "Whether to enable zucchini feature when processing executable files.");
+
+DEFINE_string(erofs_compression_param,
+              "",
+              "Compression parameter passed to mkfs.erofs's -z option. "
+              "Example: lz4 lz4hc,9");
+
+DEFINE_int64(max_threads,
+             0,
+             "The maximum number of threads allowed for generating "
+             "ota.");
+
+void RoundDownPartitions(const ImageConfig& config) {
+  for (const auto& part : config.partitions) {
+    if (part.path.empty()) {
+      continue;
+    }
+    const auto size = utils::FileSize(part.path);
+    if (size % kBlockSize != 0) {
+      const auto err =
+          truncate(part.path.c_str(), size / kBlockSize * kBlockSize);
+      CHECK_EQ(err, 0) << "Failed to truncate " << part.path << ", error "
+                       << strerror(errno);
+    }
+  }
+}
+
+void RoundUpPartitions(const ImageConfig& config) {
+  for (const auto& part : config.partitions) {
+    if (part.path.empty()) {
+      continue;
+    }
+    const auto size = utils::FileSize(part.path);
+    if (size % kBlockSize != 0) {
+      const auto err = truncate(
+          part.path.c_str(), (size + kBlockSize - 1) / kBlockSize * kBlockSize);
+      CHECK_EQ(err, 0) << "Failed to truncate " << part.path << ", error "
+                       << strerror(errno);
+    }
+  }
+}
+
 int Main(int argc, char** argv) {
-  DEFINE_string(old_image, "", "Path to the old rootfs");
-  DEFINE_string(new_image, "", "Path to the new rootfs");
-  DEFINE_string(old_kernel, "", "Path to the old kernel partition image");
-  DEFINE_string(new_kernel, "", "Path to the new kernel partition image");
-  DEFINE_string(old_partitions,
-                "",
-                "Path to the old partitions. To pass multiple partitions, use "
-                "a single argument with a colon between paths, e.g. "
-                "/path/to/part:/path/to/part2::/path/to/last_part . Path can "
-                "be empty, but it has to match the order of partition_names.");
-  DEFINE_string(new_partitions,
-                "",
-                "Path to the new partitions. To pass multiple partitions, use "
-                "a single argument with a colon between paths, e.g. "
-                "/path/to/part:/path/to/part2:/path/to/last_part . Path has "
-                "to match the order of partition_names.");
-  DEFINE_string(old_mapfiles,
-                "",
-                "Path to the .map files associated with the partition files "
-                "in the old partition. The .map file is normally generated "
-                "when creating the image in Android builds. Only recommended "
-                "for unsupported filesystem. Pass multiple files separated by "
-                "a colon as with -old_partitions.");
-  DEFINE_string(new_mapfiles,
-                "",
-                "Path to the .map files associated with the partition files "
-                "in the new partition, similar to the -old_mapfiles flag.");
-  DEFINE_string(partition_names,
-                string(kPartitionNameRoot) + ":" + kPartitionNameKernel,
-                "Names of the partitions. To pass multiple names, use a single "
-                "argument with a colon between names, e.g. "
-                "name:name2:name3:last_name . Name can not be empty, and it "
-                "has to match the order of partitions.");
-  DEFINE_string(in_file,
-                "",
-                "Path to input delta payload file used to hash/sign payloads "
-                "and apply delta over old_image (for debugging)");
-  DEFINE_string(out_file, "", "Path to output delta payload file");
-  DEFINE_string(out_hash_file, "", "Path to output hash file");
-  DEFINE_string(
-      out_metadata_hash_file, "", "Path to output metadata hash file");
-  DEFINE_string(
-      out_metadata_size_file, "", "Path to output metadata size file");
-  DEFINE_string(private_key, "", "Path to private key in .pem format");
-  DEFINE_string(public_key, "", "Path to public key in .pem format");
-  DEFINE_int32(
-      public_key_version, -1, "DEPRECATED. Key-check version # of client");
-  DEFINE_string(signature_size,
-                "",
-                "Raw signature size used for hash calculation. "
-                "You may pass in multiple sizes by colon separating them. E.g. "
-                "2048:2048:4096 will assume 3 signatures, the first two with "
-                "2048 size and the last 4096.");
-  DEFINE_string(payload_signature_file,
-                "",
-                "Raw signature file to sign payload with. To pass multiple "
-                "signatures, use a single argument with a colon between paths, "
-                "e.g. /path/to/sig:/path/to/next:/path/to/last_sig . Each "
-                "signature will be assigned a client version, starting from "
-                "kSignatureOriginalVersion.");
-  DEFINE_string(metadata_signature_file,
-                "",
-                "Raw signature file with the signature of the metadata hash. "
-                "To pass multiple signatures, use a single argument with a "
-                "colon between paths, "
-                "e.g. /path/to/sig:/path/to/next:/path/to/last_sig .");
-  DEFINE_int32(
-      chunk_size, 200 * 1024 * 1024, "Payload chunk size (-1 for whole files)");
-  DEFINE_uint64(rootfs_partition_size,
-                chromeos_update_engine::kRootFSPartitionSize,
-                "RootFS partition size for the image once installed");
-  DEFINE_uint64(
-      major_version, 2, "The major version of the payload being generated.");
-  DEFINE_int32(minor_version,
-               -1,
-               "The minor version of the payload being generated "
-               "(-1 means autodetect).");
-  DEFINE_string(properties_file,
-                "",
-                "If passed, dumps the payload properties of the payload passed "
-                "in --in_file and exits. Look at --properties_format.");
-  DEFINE_string(properties_format,
-                kPayloadPropertiesFormatKeyValue,
-                "Defines the format of the --properties_file. The acceptable "
-                "values are: key-value (default) and json");
-  DEFINE_int64(max_timestamp,
-               0,
-               "The maximum timestamp of the OS allowed to apply this "
-               "payload.");
-  DEFINE_string(
-      partition_timestamps,
-      "",
-      "The per-partition maximum timestamps which the OS allowed to apply this "
-      "payload. Passed in comma separated pairs, e.x. system:1234,vendor:5678");
-
-  DEFINE_string(new_postinstall_config_file,
-                "",
-                "A config file specifying postinstall related metadata. "
-                "Only allowed in major version 2 or newer.");
-  DEFINE_string(dynamic_partition_info_file,
-                "",
-                "An info file specifying dynamic partition metadata. "
-                "Only allowed in major version 2 or newer.");
-  DEFINE_bool(disable_fec_computation,
-              false,
-              "Disables the fec data computation on device.");
-  DEFINE_bool(disable_verity_computation,
-              false,
-              "Disables the verity data computation on device.");
-  DEFINE_string(
-      out_maximum_signature_size_file,
-      "",
-      "Path to the output maximum signature size given a private key.");
-  DEFINE_bool(is_partial_update,
-              false,
-              "The payload only targets a subset of partitions on the device,"
-              "e.g. generic kernel image update.");
-  DEFINE_bool(
-      disable_vabc,
-      false,
-      "Whether to disable Virtual AB Compression when installing the OTA");
-  DEFINE_bool(enable_vabc_xor,
-              false,
-              "Whether to use Virtual AB Compression XOR feature");
-  DEFINE_string(
-      apex_info_file, "", "Path to META/apex_info.pb found in target build");
-  DEFINE_string(compressor_types,
-                "bz2:brotli",
-                "Colon ':' separated list of compressors. Allowed valures are "
-                "bz2 and brotli.");
-  DEFINE_bool(
-      enable_lz4diff,
-      false,
-      "Whether to enable LZ4diff feature when processing EROFS images.");
-
-  DEFINE_bool(
-      enable_zucchini,
-      true,
-      "Whether to enable zucchini feature when processing executable files.");
-
-  DEFINE_string(erofs_compression_param,
-                "",
-                "Compression parameter passed to mkfs.erofs's -z option. "
-                "Example: lz4 lz4hc,9");
-
-  brillo::FlagHelper::Init(
-      argc,
-      argv,
+  gflags::SetUsageMessage(
       "Generates a payload to provide to ChromeOS' update_engine.\n\n"
       "This tool can create full payloads and also delta payloads if the src\n"
       "image is provided. It also provides debugging options to apply, sign\n"
       "and verify payloads.");
+  gflags::ParseCommandLineFlags(&argc, &argv, true);
+  CHECK_EQ(argc, 1) << " Unused args: "
+                    << android::base::Join(
+                           std::vector<char*>(argv + 1, argv + argc), " ");
+
   Terminator::Init();
 
   logging::LoggingSettings log_settings;
@@ -478,7 +523,7 @@
         << "Private key is not provided when calculating the maximum signature "
            "size.";
 
-    size_t maximum_signature_size;
+    size_t maximum_signature_size{};
     if (!PayloadSigner::GetMaximumSignatureSize(FLAGS_private_key,
                                                 &maximum_signature_size)) {
       LOG(ERROR) << "Failed to get the maximum signature size of private key: "
@@ -648,8 +693,10 @@
   // The partition size is never passed to the delta_generator, so we
   // need to detect those from the provided files.
   if (payload_config.is_delta) {
+    RoundDownPartitions(payload_config.source);
     CHECK(payload_config.source.LoadImageSize());
   }
+  RoundUpPartitions(payload_config.target);
   CHECK(payload_config.target.LoadImageSize());
 
   if (!FLAGS_dynamic_partition_info_file.empty()) {
@@ -685,7 +732,7 @@
     // image.
     if (payload_config.is_delta) {
       brillo::KeyValueStore store;
-      uint32_t minor_version;
+      uint32_t minor_version{};
       bool minor_version_found = false;
       for (const PartitionConfig& part : payload_config.source.partitions) {
         if (part.fs_interface && part.fs_interface->LoadSettings(&store) &&
@@ -719,6 +766,11 @@
   }
 
   payload_config.max_timestamp = FLAGS_max_timestamp;
+
+  payload_config.security_patch_level = FLAGS_security_patch_level;
+
+  payload_config.max_threads = FLAGS_max_threads;
+
   if (!FLAGS_partition_timestamps.empty()) {
     CHECK(ParsePerPartitionTimestamps(FLAGS_partition_timestamps,
                                       &payload_config));
@@ -726,8 +778,20 @@
 
   if (payload_config.is_delta &&
       payload_config.version.minor >= kVerityMinorPayloadVersion &&
-      !FLAGS_disable_verity_computation)
+      !FLAGS_disable_verity_computation) {
     CHECK(payload_config.target.LoadVerityConfig());
+    for (size_t i = 0; i < payload_config.target.partitions.size(); ++i) {
+      if (payload_config.source.partitions[i].fs_interface != nullptr) {
+        continue;
+      }
+      if (!payload_config.target.partitions[i].verity.IsEmpty()) {
+        LOG(INFO) << "Partition " << payload_config.target.partitions[i].name
+                  << " is installed in full OTA, disaling verity for this "
+                     "specific partition.";
+        payload_config.target.partitions[i].verity.Clear();
+      }
+    }
+  }
 
   LOG(INFO) << "Generating " << (payload_config.is_delta ? "delta" : "full")
             << " update";
@@ -738,7 +802,7 @@
     return 1;
   }
 
-  uint64_t metadata_size;
+  uint64_t metadata_size{};
   if (!GenerateUpdatePayloadFile(
           payload_config, FLAGS_out_file, FLAGS_private_key, &metadata_size)) {
     return 1;
diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc
index 129377a..f404c79 100644
--- a/payload_generator/payload_file.cc
+++ b/payload_generator/payload_file.cc
@@ -26,7 +26,6 @@
 
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/delta_performer.h"
 #include "update_engine/payload_consumer/file_writer.h"
 #include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/annotated_operation.h"
@@ -67,6 +66,9 @@
   manifest_.set_minor_version(config.version.minor);
   manifest_.set_block_size(config.block_size);
   manifest_.set_max_timestamp(config.max_timestamp);
+  if (!config.security_patch_level.empty()) {
+    manifest_.set_security_patch_level(config.security_patch_level);
+  }
 
   if (config.target.dynamic_partition_metadata != nullptr)
     *(manifest_.mutable_dynamic_partition_metadata()) =
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index d520123..387cc3a 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -29,7 +29,6 @@
 #include "bsdiff/constants.h"
 #include "payload_consumer/payload_constants.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/delta_performer.h"
 #include "update_engine/payload_generator/boot_img_filesystem.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/delta_diff_utils.h"
@@ -54,6 +53,16 @@
          fec_extent.num_blocks() == 0 && fec_roots == 0;
 }
 
+void VerityConfig::Clear() {
+  hash_tree_data_extent.Clear();
+  hash_tree_extent.Clear();
+  hash_tree_algorithm.clear();
+  hash_tree_salt.clear();
+  fec_data_extent.Clear();
+  fec_extent.Clear();
+  fec_roots = 0;
+}
+
 bool PartitionConfig::ValidateExists() const {
   TEST_AND_RETURN_FALSE(!path.empty());
   TEST_AND_RETURN_FALSE(utils::FileExists(path.c_str()));
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index 7124cb0..225237a 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -27,7 +27,6 @@
 #include <brillo/secure_blob.h>
 
 #include "bsdiff/constants.h"
-#include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_generator/filesystem_interface.h"
 #include "update_engine/update_metadata.pb.h"
 
@@ -59,6 +58,9 @@
   // Whether the verity config is empty.
   bool IsEmpty() const;
 
+  // Clears this config, subsequent calls to "IsEmpty" will return true.
+  void Clear();
+
   // The extent for data covered by verity hash tree.
   Extent hash_tree_data_extent;
 
@@ -259,6 +261,10 @@
   // Whether to enable zucchini ops
   bool enable_zucchini = true;
 
+  std::string security_patch_level;
+
+  uint32_t max_threads = 0;
+
   std::vector<bsdiff::CompressorType> compressors{
       bsdiff::CompressorType::kBZ2, bsdiff::CompressorType::kBrotli};
 
diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc
index d9f0dd7..11e136f 100644
--- a/payload_generator/payload_signer.cc
+++ b/payload_generator/payload_signer.cc
@@ -28,17 +28,14 @@
 #include <brillo/data_encoding.h>
 #include <openssl/err.h>
 #include <openssl/pem.h>
+#include <unistd.h>
 
 #include "update_engine/common/constants.h"
 #include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/subprocess.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/delta_performer.h"
-#include "update_engine/payload_consumer/payload_constants.h"
 #include "update_engine/payload_consumer/payload_metadata.h"
 #include "update_engine/payload_consumer/payload_verifier.h"
-#include "update_engine/payload_generator/delta_diff_generator.h"
-#include "update_engine/payload_generator/payload_file.h"
 #include "update_engine/update_metadata.pb.h"
 
 using std::string;
@@ -122,45 +119,35 @@
   DeltaArchiveManifest manifest;
   TEST_AND_RETURN_FALSE(payload_metadata.GetManifest(payload, &manifest));
 
-  // Is there already a signature op in place?
-  if (manifest.has_signatures_size()) {
-    // The signature op is tied to the size of the signature blob, but not it's
-    // contents. We don't allow the manifest to change if there is already an op
-    // present, because that might invalidate previously generated
-    // hashes/signatures.
-    if (manifest.signatures_size() != payload_signature.size()) {
-      LOG(ERROR) << "Attempt to insert different signature sized blob. "
-                 << "(current:" << manifest.signatures_size()
-                 << "new:" << payload_signature.size() << ")";
-      return false;
-    }
-
-    LOG(INFO) << "Matching signature sizes already present.";
-  } else {
-    // Updates the manifest to include the signature operation.
-    PayloadSigner::AddSignatureToManifest(
-        payload.size() - metadata_size - metadata_signature_size,
-        payload_signature.size(),
-        &manifest);
-
-    // Updates the payload to include the new manifest.
-    string serialized_manifest;
-    TEST_AND_RETURN_FALSE(manifest.AppendToString(&serialized_manifest));
-    LOG(INFO) << "Updated protobuf size: " << serialized_manifest.size();
-    payload.erase(payload.begin() + manifest_offset,
-                  payload.begin() + metadata_size);
-    payload.insert(payload.begin() + manifest_offset,
-                   serialized_manifest.begin(),
-                   serialized_manifest.end());
-
-    // Updates the protobuf size.
-    uint64_t size_be = htobe64(serialized_manifest.size());
-    memcpy(&payload[kProtobufSizeOffset], &size_be, sizeof(size_be));
-    metadata_size = serialized_manifest.size() + manifest_offset;
-
-    LOG(INFO) << "Updated payload size: " << payload.size();
-    LOG(INFO) << "Updated metadata size: " << metadata_size;
+  // Erase existing signatures.
+  if (manifest.has_signatures_offset()) {
+    payload.resize(manifest.signatures_offset() + metadata_size +
+                   metadata_signature_size);
   }
+
+  // Updates the manifest to include the signature operation.
+  PayloadSigner::AddSignatureToManifest(
+      payload.size() - metadata_size - metadata_signature_size,
+      payload_signature.size(),
+      &manifest);
+
+  // Updates the payload to include the new manifest.
+  string serialized_manifest;
+  TEST_AND_RETURN_FALSE(manifest.AppendToString(&serialized_manifest));
+  LOG(INFO) << "Updated protobuf size: " << serialized_manifest.size();
+  payload.erase(payload.begin() + manifest_offset,
+                payload.begin() + metadata_size);
+  payload.insert(payload.begin() + manifest_offset,
+                 serialized_manifest.begin(),
+                 serialized_manifest.end());
+
+  // Updates the protobuf size.
+  uint64_t size_be = htobe64(serialized_manifest.size());
+  memcpy(&payload[kProtobufSizeOffset], &size_be, sizeof(size_be));
+  metadata_size = serialized_manifest.size() + manifest_offset;
+
+  LOG(INFO) << "Updated payload size: " << payload.size();
+  LOG(INFO) << "Updated metadata size: " << metadata_size;
   uint64_t signatures_offset =
       metadata_size + metadata_signature_size + manifest.signatures_offset();
   LOG(INFO) << "Signature Blob Offset: " << signatures_offset;
diff --git a/payload_generator/payload_signer_unittest.cc b/payload_generator/payload_signer_unittest.cc
index 2bfc820..96e4431 100644
--- a/payload_generator/payload_signer_unittest.cc
+++ b/payload_generator/payload_signer_unittest.cc
@@ -158,33 +158,6 @@
   EXPECT_FALSE(payload_verifier->VerifySignature(signature, hash_data_));
 }
 
-TEST_F(PayloadSignerTest, SkipMetadataSignatureTest) {
-  ScopedTempFile payload_file("payload.XXXXXX");
-  PayloadGenerationConfig config;
-  config.version.major = kBrilloMajorPayloadVersion;
-  PayloadFile payload;
-  EXPECT_TRUE(payload.Init(config));
-  uint64_t metadata_size;
-  EXPECT_TRUE(payload.WritePayload(
-      payload_file.path(), "/dev/null", "", &metadata_size));
-  const vector<size_t> sizes = {256};
-  brillo::Blob unsigned_payload_hash, unsigned_metadata_hash;
-  EXPECT_TRUE(PayloadSigner::HashPayloadForSigning(payload_file.path(),
-                                                   sizes,
-                                                   &unsigned_payload_hash,
-                                                   &unsigned_metadata_hash));
-  EXPECT_TRUE(
-      payload.WritePayload(payload_file.path(),
-                           "/dev/null",
-                           GetBuildArtifactsPath(kUnittestPrivateKeyPath),
-                           &metadata_size));
-  brillo::Blob signed_payload_hash, signed_metadata_hash;
-  EXPECT_TRUE(PayloadSigner::HashPayloadForSigning(
-      payload_file.path(), sizes, &signed_payload_hash, &signed_metadata_hash));
-  EXPECT_EQ(unsigned_payload_hash, signed_payload_hash);
-  EXPECT_EQ(unsigned_metadata_hash, signed_metadata_hash);
-}
-
 TEST_F(PayloadSignerTest, VerifySignedPayloadTest) {
   ScopedTempFile payload_file("payload.XXXXXX");
   PayloadGenerationConfig config;
diff --git a/sample_images/generate_test_erofs_images.sh b/sample_images/generate_test_erofs_images.sh
index b3b6aa7..a810529 100755
--- a/sample_images/generate_test_erofs_images.sh
+++ b/sample_images/generate_test_erofs_images.sh
@@ -36,7 +36,7 @@
   truncate -s 1M ${fs_root}/dir1/dir2/file4
   touch ${fs_root}/dir1/dir2/dir123/empty
   cp ${delta_generator} ${fs_root}/delta_generator
-  truncate -s 8M ${fs_root}/delta_generator
+  truncate -s 1M ${fs_root}/delta_generator
   echo "PAYLOAD_MINOR_VERSION=1234" > ${fs_root}/etc/update_engine.conf
   truncate -s 16M ${fs_root}/dir1/dir2/dir123/chunks_of_zero
 fi
diff --git a/scripts/Android.bp b/scripts/Android.bp
new file mode 100644
index 0000000..e86a9f2
--- /dev/null
+++ b/scripts/Android.bp
@@ -0,0 +1,79 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//
+// Module-specific defaults.
+//
+// For module X, if we need to build it both as a library and an executable:
+//  - A default rule `releasetools_X_defaults` is created, which lists `srcs`, `libs` and
+//    `required` properties.
+//  - `python_library_host` and `python_binary_host` are created by listing
+//    `releasetools_X_defaults` in their defaults.
+//
+
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "system_update_engine_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["system_update_engine_license"],
+}
+
+filegroup {
+    name: "update_device_script",
+    srcs: [
+        "**/*.py",
+    ],
+    path: ".",
+}
+
+python_library_host {
+    name: "update_payload",
+
+    srcs: [
+        "update_payload/__init__.py",
+        "update_payload/payload.py",
+        "update_payload/checker.py",
+        "update_payload/common.py",
+        "update_payload/error.py",
+        "update_payload/histogram.py",
+        "update_payload/format_utils.py",
+    ],
+    proto: {
+        canonical_path_from_root: false,
+    },
+    libs: [
+        "update_metadata-protos-python",
+    ],
+}
+
+python_binary_host {
+    name: "update_device",
+    srcs: [
+        "update_device.py",
+    ],
+    main: "update_device.py",
+    libs: [
+        "update_payload",
+    ],
+    version: {
+        py2: {
+            enabled: false,
+        },
+        py3: {
+            enabled: true,
+        },
+    },
+}
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index b2d6080..083bfc2 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -220,6 +220,10 @@
     "Required if --enabled_lz4diff true is passed. Path to liblz4.so. delta_generator will use this copy of liblz4.so for compression. It is important that this copy of liblz4.so is the same as the one on source build."
   DEFINE_string erofs_compression_param "" \
     "Compression parameter passed to mkfs.erofs's -z option."
+  DEFINE_string security_patch_level "" \
+    "Optional: security patch level of this OTA"
+  DEFINE_string max_threads "" \
+    "Optional: specifies max_threads used to generate OTA"
 fi
 if [[ "${COMMAND}" == "hash" || "${COMMAND}" == "sign" ]]; then
   DEFINE_string unsigned_payload "" "Path to the input unsigned payload."
@@ -759,6 +763,11 @@
       --disable_vabc="${FLAGS_disable_vabc}" )
   fi
 
+  if [[ -n "${FLAGS_max_threads}" ]]; then
+    GENERATOR_ARGS+=(
+      --max_threads="${FLAGS_max_threads}" )
+  fi
+
   # minor version is set only for delta or partial payload.
   if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
     GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
@@ -776,6 +785,10 @@
     GENERATOR_ARGS+=( --max_timestamp="${FLAGS_max_timestamp}" )
   fi
 
+  if [[ -n "${FLAGS_security_patch_level}" ]]; then
+    GENERATOR_ARGS+=( --security_patch_level="${FLAGS_security_patch_level}" )
+  fi
+
   if [[ -n "${FLAGS_partition_timestamps}" ]]; then
     GENERATOR_ARGS+=( --partition_timestamps="${FLAGS_partition_timestamps}" )
   fi
diff --git a/scripts/simulate_ota.py b/scripts/simulate_ota.py
old mode 100644
new mode 100755
index bf1fc98..0e5a21b
--- a/scripts/simulate_ota.py
+++ b/scripts/simulate_ota.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 #
 # Copyright (C) 2020 The Android Open Source Project
 #
@@ -17,8 +18,6 @@
 """Tools for running host side simulation of an OTA update."""
 
 
-from __future__ import print_function
-
 import argparse
 import filecmp
 import os
@@ -49,7 +48,8 @@
     return fp.read(4) == b'\x3A\xFF\x26\xED'
 
 
-def extract_img(zip_archive: zipfile.ZipFile, img_name, output_path):
+def extract_img(zip_archive: zipfile.ZipFile, img_name, output_path, is_source):
+  """ Extract and unsparse partition image from zip archive """
   entry_name = "IMAGES/" + img_name + ".img"
   try:
     extract_file(zip_archive, entry_name, output_path)
@@ -61,6 +61,22 @@
     subprocess.check_output(["simg2img", output_path, raw_img_path])
     os.rename(raw_img_path, output_path)
 
+  # delta_generator only supports images multiple of 4 KiB. For target images
+  # we pad the data with zeros if needed, but for source images we truncate
+  # down the data since the last block of the old image could be padded on
+  # disk with unknown data.
+  file_size = os.path.getsize(output_path)
+  if file_size % 4096 != 0:
+    if is_source:
+      print("Rounding DOWN partition {} to a multiple of 4 KiB."
+            .format(output_path))
+      file_size = file_size & -4096
+    else:
+      print("Rounding UP partition {} to a multiple of 4 KiB."
+            .format(output_path))
+      file_size = (file_size + 4095) & -4096
+    with open(output_path, 'a') as f:
+      f.truncate(file_size)
 
 def run_ota(source, target, payload_path, tempdir, output_dir):
   """Run an OTA on host side"""
@@ -87,10 +103,10 @@
           "source target file must point to a valid zipfile or directory " + \
           source
       print("Extracting source image for", name)
-      extract_img(source, name, old_image)
+      extract_img(source, name, old_image, True)
     if target_exist:
       print("Extracting target image for", name)
-      extract_img(target, name, new_image)
+      extract_img(target, name, new_image, False)
 
     old_partitions.append(old_image)
     scratch_image_name = new_image + ".actual"
diff --git a/scripts/trim_ota_package.py b/scripts/trim_ota_package.py
index df7f170..8bf2182 100644
--- a/scripts/trim_ota_package.py
+++ b/scripts/trim_ota_package.py
@@ -27,7 +27,7 @@
   outfile = argv[2]
   with zipfile.ZipFile(infile, "r") as inzfp, zipfile.ZipFile(outfile, "w") as outzfp:
     for entry in inzfp.infolist():
-      if entry.filename.startswith("META") or entry.filename.endswith(".map"):
+      if entry.filename.startswith("META") or entry.filename.endswith(".map") or entry.filename.endswith(".prop"):
         outzfp.writestr(entry, inzfp.read(entry))
       elif entry.filename == "payload.bin":
         outzfp.writestr(entry, readPayloadMetadata(inzfp, entry))
diff --git a/scripts/update_device.py b/scripts/update_device.py
index 72cee49..f94774b 100755
--- a/scripts/update_device.py
+++ b/scripts/update_device.py
@@ -25,6 +25,7 @@
 import hashlib
 import logging
 import os
+import re
 import socket
 import subprocess
 import sys
@@ -50,7 +51,7 @@
 DEVICE_PORT = 1234
 
 
-def CopyFileObjLength(fsrc, fdst, buffer_size=128 * 1024, copy_length=None):
+def CopyFileObjLength(fsrc, fdst, buffer_size=128 * 1024, copy_length=None, speed_limit=None):
   """Copy from a file object to another.
 
   This function is similar to shutil.copyfileobj except that it allows to copy
@@ -61,10 +62,18 @@
     fdst: destination file object where to write to.
     buffer_size: size of the copy buffer in memory.
     copy_length: maximum number of bytes to copy, or None to copy everything.
+    speed_limit: upper limit for copying speed, in bytes per second.
 
   Returns:
     the number of bytes copied.
   """
+  # If buffer size significantly bigger than speed limit
+  # traffic would seem extremely spiky to the client.
+  if speed_limit:
+    print(f"Applying speed limit: {speed_limit}")
+    buffer_size = min(speed_limit//32, buffer_size)
+
+  start_time = time.time()
   copied = 0
   while True:
     chunk_size = buffer_size
@@ -75,6 +84,11 @@
     buf = fsrc.read(chunk_size)
     if not buf:
       break
+    if speed_limit:
+      expected_duration = copied/speed_limit
+      actual_duration = time.time() - start_time
+      if actual_duration < expected_duration:
+        time.sleep(expected_duration-actual_duration)
     fdst.write(buf)
     copied += len(buf)
   return copied
@@ -211,7 +225,8 @@
     self.end_headers()
 
     f.seek(serving_start + start_range)
-    CopyFileObjLength(f, self.wfile, copy_length=end_range - start_range)
+    CopyFileObjLength(f, self.wfile, copy_length=end_range -
+                      start_range, speed_limit=self.speed_limit)
 
   def do_POST(self):  # pylint: disable=invalid-name
     """Reply with the omaha response xml."""
@@ -291,12 +306,13 @@
 class ServerThread(threading.Thread):
   """A thread for serving HTTP requests."""
 
-  def __init__(self, ota_filename, serving_range):
+  def __init__(self, ota_filename, serving_range, speed_limit):
     threading.Thread.__init__(self)
     # serving_payload and serving_range are class attributes and the
     # UpdateHandler class is instantiated with every request.
     UpdateHandler.serving_payload = ota_filename
     UpdateHandler.serving_range = serving_range
+    UpdateHandler.speed_limit = speed_limit
     self._httpd = BaseHTTPServer.HTTPServer(('127.0.0.1', 0), UpdateHandler)
     self.port = self._httpd.server_port
 
@@ -312,8 +328,8 @@
     self._httpd.socket.close()
 
 
-def StartServer(ota_filename, serving_range):
-  t = ServerThread(ota_filename, serving_range)
+def StartServer(ota_filename, serving_range, speed_limit):
+  t = ServerThread(ota_filename, serving_range, speed_limit)
   t.start()
   return t
 
@@ -408,6 +424,27 @@
       ]) == 0
 
 
+def ParseSpeedLimit(arg: str) -> int:
+  arg = arg.strip().upper()
+  if not re.match(r"\d+[KkMmGgTt]?", arg):
+    raise argparse.ArgumentError(
+        "Wrong speed limit format, expected format is number followed by unit, such as 10K, 5m, 3G (case insensitive)")
+  unit = 1
+  if arg[-1].isalpha():
+    if arg[-1] == "K":
+      unit = 1024
+    elif arg[-1] == "M":
+      unit = 1024 * 1024
+    elif arg[-1] == "G":
+      unit = 1024 * 1024 * 1024
+    elif arg[-1] == "T":
+      unit = 1024 * 1024 * 1024 * 1024
+    else:
+      raise argparse.ArgumentError(
+          f"Unsupported unit for download speed: {arg[-1]}, supported units are K,M,G,T (case insensitive)")
+  return int(float(arg[:-1]) * unit)
+
+
 def main():
   parser = argparse.ArgumentParser(description='Android A/B OTA helper.')
   parser.add_argument('otafile', metavar='PAYLOAD', type=str,
@@ -444,7 +481,22 @@
                       help='Perform reset slot switch for this OTA package')
   parser.add_argument('--wipe-user-data', action='store_true',
                       help='Wipe userdata after installing OTA')
+  parser.add_argument('--vabc-none', action='store_true',
+                      help='Set Virtual AB Compression algorithm to none, but still use Android COW format')
+  parser.add_argument('--disable-vabc', action='store_true',
+                      help='Option to enable or disable vabc. If set to false, will fall back on A/B')
+  parser.add_argument('--enable-threading', action='store_true',
+                      help='Enable multi-threaded compression for VABC')
+  parser.add_argument('--batched-writes', action='store_true',
+                      help='Enable batched writes for VABC')
+  parser.add_argument('--speed-limit', type=str,
+                      help='Speed limit for serving payloads over HTTP. For '
+                      'example: 10K, 5m, 1G, input is case insensitive')
+
   args = parser.parse_args()
+  if args.speed_limit:
+    args.speed_limit = ParseSpeedLimit(args.speed_limit)
+
   logging.basicConfig(
       level=logging.WARNING if args.no_verbose else logging.INFO)
 
@@ -497,6 +549,14 @@
     args.extra_headers += "\nRUN_POST_INSTALL=0"
   if args.wipe_user_data:
     args.extra_headers += "\nPOWERWASH=1"
+  if args.vabc_none:
+    args.extra_headers += "\nVABC_NONE=1"
+  if args.disable_vabc:
+    args.extra_headers += "\nDISABLE_VABC=1"
+  if args.enable_threading:
+    args.extra_headers += "\nENABLE_THREADING=1"
+  if args.batched_writes:
+    args.extra_headers += "\nBATCHED_WRITES=1"
 
   with zipfile.ZipFile(args.otafile) as zfp:
     CARE_MAP_ENTRY_NAME = "care_map.pb"
@@ -531,7 +591,7 @@
       serving_range = (ota.offset, ota.size)
     else:
       serving_range = (0, os.stat(args.otafile).st_size)
-    server_thread = StartServer(args.otafile, serving_range)
+    server_thread = StartServer(args.otafile, serving_range, args.speed_limit)
     cmds.append(
         ['reverse', 'tcp:%d' % DEVICE_PORT, 'tcp:%d' % server_thread.port])
     finalize_cmds.append(['reverse', '--remove', 'tcp:%d' % DEVICE_PORT])
diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_metadata_pb2.py
similarity index 75%
rename from scripts/update_payload/update_metadata_pb2.py
rename to scripts/update_metadata_pb2.py
index b62a67a..cb6c39c 100644
--- a/scripts/update_payload/update_metadata_pb2.py
+++ b/scripts/update_metadata_pb2.py
@@ -20,7 +20,7 @@
   package='chromeos_update_engine',
   syntax='proto2',
   serialized_options=_b('H\003'),
-  serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"\x9f\x01\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1aO\n\tSignature\x12\x13\n\x07version\x18\x01 \x01(\rB\x02\x18\x01\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x1f\n\x17unpadded_signature_size\x18\x03 \x01(\x07\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"\x8f\x01\n\tImageInfo\x12\x11\n\x05\x62oard\x18\x01 \x01(\tB\x02\x18\x01\x12\x0f\n\x03key\x18\x02 \x01(\tB\x02\x18\x01\x12\x13\n\x07\x63hannel\x18\x03 \x01(\tB\x02\x18\x01\x12\x13\n\x07version\x18\x04 \x01(\tB\x02\x18\x01\x12\x19\n\rbuild_channel\x18\x05 \x01(\tB\x02\x18\x01\x12\x19\n\rbuild_version\x18\x06 \x01(\tB\x02\x18\x01\"\xfc\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xbb\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\x12\x0c\n\x08ZUCCHINI\x10\x0b\"\x81\x02\n\x11\x43owMergeOperation\x12<\n\x04type\x18\x01 \x01(\x0e\x32..chromeos_update_engine.CowMergeOperation.Type\x12\x32\n\nsrc_extent\x18\x02 \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\ndst_extent\x18\x03 \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_offset\x18\x04 \x01(\r\"2\n\x04Type\x12\x0c\n\x08\x43OW_COPY\x10\x00\x12\x0b\n\x07\x43OW_XOR\x10\x01\x12\x0f\n\x0b\x43OW_REPLACE\x10\x02\"\xc8\x06\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\x12\x0f\n\x07version\x18\x11 \x01(\t\x12\x43\n\x10merge_operations\x18\x12 \x03(\x0b\x32).chromeos_update_engine.CowMergeOperation\x12\x19\n\x11\x65stimate_cow_size\x18\x13 \x01(\x04\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"\xbe\x01\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\x12\x14\n\x0cvabc_enabled\x18\x03 \x01(\x08\x12\x1e\n\x16vabc_compression_param\x18\x04 \x01(\t\x12\x13\n\x0b\x63ow_version\x18\x05 \x01(\r\"c\n\x08\x41pexInfo\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\x03\x12\x15\n\ris_compressed\x18\x03 \x01(\x08\x12\x19\n\x11\x64\x65\x63ompressed_size\x18\x04 \x01(\x03\"C\n\x0c\x41pexMetadata\x12\x33\n\tapex_info\x18\x01 \x03(\x0b\x32 .chromeos_update_engine.ApexInfo\"\x9e\x07\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12=\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfoB\x02\x18\x01\x12=\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfoB\x02\x18\x01\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadata\x12\x16\n\x0epartial_update\x18\x10 \x01(\x08\x12\x33\n\tapex_info\x18\x11 \x03(\x0b\x32 .chromeos_update_engine.ApexInfoB\x02H\x03')
+  serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"\x9f\x01\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1aO\n\tSignature\x12\x13\n\x07version\x18\x01 \x01(\rB\x02\x18\x01\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x1f\n\x17unpadded_signature_size\x18\x03 \x01(\x07\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"\xa6\x04\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xe5\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\x12\x0c\n\x08ZUCCHINI\x10\x0b\x12\x12\n\x0eLZ4DIFF_BSDIFF\x10\x0c\x12\x14\n\x10LZ4DIFF_PUFFDIFF\x10\r\"\x81\x02\n\x11\x43owMergeOperation\x12<\n\x04type\x18\x01 \x01(\x0e\x32..chromeos_update_engine.CowMergeOperation.Type\x12\x32\n\nsrc_extent\x18\x02 \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\ndst_extent\x18\x03 \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_offset\x18\x04 \x01(\r\"2\n\x04Type\x12\x0c\n\x08\x43OW_COPY\x10\x00\x12\x0b\n\x07\x43OW_XOR\x10\x01\x12\x0f\n\x0b\x43OW_REPLACE\x10\x02\"\xc8\x06\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\x12\x0f\n\x07version\x18\x11 \x01(\t\x12\x43\n\x10merge_operations\x18\x12 \x03(\x0b\x32).chromeos_update_engine.CowMergeOperation\x12\x19\n\x11\x65stimate_cow_size\x18\x13 \x01(\x04\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"8\n\x0eVABCFeatureSet\x12\x10\n\x08threaded\x18\x01 \x01(\x08\x12\x14\n\x0c\x62\x61tch_writes\x18\x02 \x01(\x08\"\x80\x02\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\x12\x14\n\x0cvabc_enabled\x18\x03 \x01(\x08\x12\x1e\n\x16vabc_compression_param\x18\x04 \x01(\t\x12\x13\n\x0b\x63ow_version\x18\x05 \x01(\r\x12@\n\x10vabc_feature_set\x18\x06 \x01(\x0b\x32&.chromeos_update_engine.VABCFeatureSet\"c\n\x08\x41pexInfo\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\x03\x12\x15\n\ris_compressed\x18\x03 \x01(\x08\x12\x19\n\x11\x64\x65\x63ompressed_size\x18\x04 \x01(\x03\"C\n\x0c\x41pexMetadata\x12\x33\n\tapex_info\x18\x01 \x03(\x0b\x32 .chromeos_update_engine.ApexInfo\"\xc3\x03\n\x14\x44\x65ltaArchiveManifest\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadata\x12\x16\n\x0epartial_update\x18\x10 \x01(\x08\x12\x33\n\tapex_info\x18\x11 \x03(\x0b\x32 .chromeos_update_engine.ApexInfo\x12\x1c\n\x14security_patch_level\x18\x12 \x01(\tJ\x04\x08\x01\x10\x02J\x04\x08\x02\x10\x03J\x04\x08\x06\x10\x07J\x04\x08\x07\x10\x08J\x04\x08\x08\x10\tJ\x04\x08\t\x10\nJ\x04\x08\n\x10\x0bJ\x04\x08\x0b\x10\x0c\x42\x02H\x03')
 )
 
 
@@ -79,11 +79,19 @@
       name='ZUCCHINI', index=11, number=11,
       serialized_options=None,
       type=None),
+    _descriptor.EnumValueDescriptor(
+      name='LZ4DIFF_BSDIFF', index=12, number=12,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='LZ4DIFF_PUFFDIFF', index=13, number=13,
+      serialized_options=None,
+      type=None),
   ],
   containing_type=None,
   serialized_options=None,
-  serialized_start=775,
-  serialized_end=962,
+  serialized_start=629,
+  serialized_end=858,
 )
 _sym_db.RegisterEnumDescriptor(_INSTALLOPERATION_TYPE)
 
@@ -108,8 +116,8 @@
   ],
   containing_type=None,
   serialized_options=None,
-  serialized_start=1172,
-  serialized_end=1222,
+  serialized_start=1068,
+  serialized_end=1118,
 )
 _sym_db.RegisterEnumDescriptor(_COWMERGEOPERATION_TYPE)
 
@@ -265,72 +273,6 @@
 )
 
 
-_IMAGEINFO = _descriptor.Descriptor(
-  name='ImageInfo',
-  full_name='chromeos_update_engine.ImageInfo',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='board', full_name='chromeos_update_engine.ImageInfo.board', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='key', full_name='chromeos_update_engine.ImageInfo.key', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='channel', full_name='chromeos_update_engine.ImageInfo.channel', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='version', full_name='chromeos_update_engine.ImageInfo.version', index=3,
-      number=4, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='build_channel', full_name='chromeos_update_engine.ImageInfo.build_channel', index=4,
-      number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='build_version', full_name='chromeos_update_engine.ImageInfo.build_version', index=5,
-      number=6, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  serialized_options=None,
-  is_extendable=False,
-  syntax='proto2',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=308,
-  serialized_end=451,
-)
-
-
 _INSTALLOPERATION = _descriptor.Descriptor(
   name='InstallOperation',
   full_name='chromeos_update_engine.InstallOperation',
@@ -414,8 +356,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=454,
-  serialized_end=962,
+  serialized_start=308,
+  serialized_end=858,
 )
 
 
@@ -467,8 +409,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=965,
-  serialized_end=1222,
+  serialized_start=861,
+  serialized_end=1118,
 )
 
 
@@ -624,8 +566,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1225,
-  serialized_end=2065,
+  serialized_start=1121,
+  serialized_end=1961,
 )
 
 
@@ -669,8 +611,46 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2067,
-  serialized_end=2143,
+  serialized_start=1963,
+  serialized_end=2039,
+)
+
+
+_VABCFEATURESET = _descriptor.Descriptor(
+  name='VABCFeatureSet',
+  full_name='chromeos_update_engine.VABCFeatureSet',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='threaded', full_name='chromeos_update_engine.VABCFeatureSet.threaded', index=0,
+      number=1, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='batch_writes', full_name='chromeos_update_engine.VABCFeatureSet.batch_writes', index=1,
+      number=2, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto2',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=2041,
+  serialized_end=2097,
 )
 
 
@@ -716,6 +696,13 @@
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='vabc_feature_set', full_name='chromeos_update_engine.DynamicPartitionMetadata.vabc_feature_set', index=5,
+      number=6, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -728,8 +715,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2146,
-  serialized_end=2336,
+  serialized_start=2100,
+  serialized_end=2356,
 )
 
 
@@ -780,8 +767,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2338,
-  serialized_end=2437,
+  serialized_start=2358,
+  serialized_end=2457,
 )
 
 
@@ -811,8 +798,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2439,
-  serialized_end=2506,
+  serialized_start=2459,
+  serialized_end=2526,
 )
 
 
@@ -824,124 +811,75 @@
   containing_type=None,
   fields=[
     _descriptor.FieldDescriptor(
-      name='install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.install_operations', index=0,
-      number=1, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1,
-      number=2, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2,
+      name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=0,
       number=3, type=13, cpp_type=3, label=1,
       has_default_value=True, default_value=4096,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='signatures_offset', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_offset', index=3,
+      name='signatures_offset', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_offset', index=1,
       number=4, type=4, cpp_type=4, label=1,
       has_default_value=False, default_value=0,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='signatures_size', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_size', index=4,
+      name='signatures_size', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_size', index=2,
       number=5, type=4, cpp_type=4, label=1,
       has_default_value=False, default_value=0,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='old_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_kernel_info', index=5,
-      number=6, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6,
-      number=7, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7,
-      number=8, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8,
-      number=9, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9,
-      number=10, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='new_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_image_info', index=10,
-      number=11, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      serialized_options=_b('\030\001'), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='minor_version', full_name='chromeos_update_engine.DeltaArchiveManifest.minor_version', index=11,
+      name='minor_version', full_name='chromeos_update_engine.DeltaArchiveManifest.minor_version', index=3,
       number=12, type=13, cpp_type=3, label=1,
       has_default_value=True, default_value=0,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='partitions', full_name='chromeos_update_engine.DeltaArchiveManifest.partitions', index=12,
+      name='partitions', full_name='chromeos_update_engine.DeltaArchiveManifest.partitions', index=4,
       number=13, type=11, cpp_type=10, label=3,
       has_default_value=False, default_value=[],
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='max_timestamp', full_name='chromeos_update_engine.DeltaArchiveManifest.max_timestamp', index=13,
+      name='max_timestamp', full_name='chromeos_update_engine.DeltaArchiveManifest.max_timestamp', index=5,
       number=14, type=3, cpp_type=2, label=1,
       has_default_value=False, default_value=0,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='dynamic_partition_metadata', full_name='chromeos_update_engine.DeltaArchiveManifest.dynamic_partition_metadata', index=14,
+      name='dynamic_partition_metadata', full_name='chromeos_update_engine.DeltaArchiveManifest.dynamic_partition_metadata', index=6,
       number=15, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='partial_update', full_name='chromeos_update_engine.DeltaArchiveManifest.partial_update', index=15,
+      name='partial_update', full_name='chromeos_update_engine.DeltaArchiveManifest.partial_update', index=7,
       number=16, type=8, cpp_type=7, label=1,
       has_default_value=False, default_value=False,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
-      name='apex_info', full_name='chromeos_update_engine.DeltaArchiveManifest.apex_info', index=16,
+      name='apex_info', full_name='chromeos_update_engine.DeltaArchiveManifest.apex_info', index=8,
       number=17, type=11, cpp_type=10, label=3,
       has_default_value=False, default_value=[],
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='security_patch_level', full_name='chromeos_update_engine.DeltaArchiveManifest.security_patch_level', index=9,
+      number=18, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
   ],
   extensions=[
   ],
@@ -954,8 +892,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=2509,
-  serialized_end=3435,
+  serialized_start=2529,
+  serialized_end=2980,
 )
 
 _SIGNATURES_SIGNATURE.containing_type = _SIGNATURES
@@ -978,26 +916,19 @@
 _PARTITIONUPDATE.fields_by_name['fec_extent'].message_type = _EXTENT
 _PARTITIONUPDATE.fields_by_name['merge_operations'].message_type = _COWMERGEOPERATION
 _DYNAMICPARTITIONMETADATA.fields_by_name['groups'].message_type = _DYNAMICPARTITIONGROUP
+_DYNAMICPARTITIONMETADATA.fields_by_name['vabc_feature_set'].message_type = _VABCFEATURESET
 _APEXMETADATA.fields_by_name['apex_info'].message_type = _APEXINFO
-_DELTAARCHIVEMANIFEST.fields_by_name['install_operations'].message_type = _INSTALLOPERATION
-_DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations'].message_type = _INSTALLOPERATION
-_DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info'].message_type = _PARTITIONINFO
-_DELTAARCHIVEMANIFEST.fields_by_name['new_kernel_info'].message_type = _PARTITIONINFO
-_DELTAARCHIVEMANIFEST.fields_by_name['old_rootfs_info'].message_type = _PARTITIONINFO
-_DELTAARCHIVEMANIFEST.fields_by_name['new_rootfs_info'].message_type = _PARTITIONINFO
-_DELTAARCHIVEMANIFEST.fields_by_name['old_image_info'].message_type = _IMAGEINFO
-_DELTAARCHIVEMANIFEST.fields_by_name['new_image_info'].message_type = _IMAGEINFO
 _DELTAARCHIVEMANIFEST.fields_by_name['partitions'].message_type = _PARTITIONUPDATE
 _DELTAARCHIVEMANIFEST.fields_by_name['dynamic_partition_metadata'].message_type = _DYNAMICPARTITIONMETADATA
 _DELTAARCHIVEMANIFEST.fields_by_name['apex_info'].message_type = _APEXINFO
 DESCRIPTOR.message_types_by_name['Extent'] = _EXTENT
 DESCRIPTOR.message_types_by_name['Signatures'] = _SIGNATURES
 DESCRIPTOR.message_types_by_name['PartitionInfo'] = _PARTITIONINFO
-DESCRIPTOR.message_types_by_name['ImageInfo'] = _IMAGEINFO
 DESCRIPTOR.message_types_by_name['InstallOperation'] = _INSTALLOPERATION
 DESCRIPTOR.message_types_by_name['CowMergeOperation'] = _COWMERGEOPERATION
 DESCRIPTOR.message_types_by_name['PartitionUpdate'] = _PARTITIONUPDATE
 DESCRIPTOR.message_types_by_name['DynamicPartitionGroup'] = _DYNAMICPARTITIONGROUP
+DESCRIPTOR.message_types_by_name['VABCFeatureSet'] = _VABCFEATURESET
 DESCRIPTOR.message_types_by_name['DynamicPartitionMetadata'] = _DYNAMICPARTITIONMETADATA
 DESCRIPTOR.message_types_by_name['ApexInfo'] = _APEXINFO
 DESCRIPTOR.message_types_by_name['ApexMetadata'] = _APEXMETADATA
@@ -1033,13 +964,6 @@
   })
 _sym_db.RegisterMessage(PartitionInfo)
 
-ImageInfo = _reflection.GeneratedProtocolMessageType('ImageInfo', (_message.Message,), {
-  'DESCRIPTOR' : _IMAGEINFO,
-  '__module__' : 'update_metadata_pb2'
-  # @@protoc_insertion_point(class_scope:chromeos_update_engine.ImageInfo)
-  })
-_sym_db.RegisterMessage(ImageInfo)
-
 InstallOperation = _reflection.GeneratedProtocolMessageType('InstallOperation', (_message.Message,), {
   'DESCRIPTOR' : _INSTALLOPERATION,
   '__module__' : 'update_metadata_pb2'
@@ -1068,6 +992,13 @@
   })
 _sym_db.RegisterMessage(DynamicPartitionGroup)
 
+VABCFeatureSet = _reflection.GeneratedProtocolMessageType('VABCFeatureSet', (_message.Message,), {
+  'DESCRIPTOR' : _VABCFEATURESET,
+  '__module__' : 'update_metadata_pb2'
+  # @@protoc_insertion_point(class_scope:chromeos_update_engine.VABCFeatureSet)
+  })
+_sym_db.RegisterMessage(VABCFeatureSet)
+
 DynamicPartitionMetadata = _reflection.GeneratedProtocolMessageType('DynamicPartitionMetadata', (_message.Message,), {
   'DESCRIPTOR' : _DYNAMICPARTITIONMETADATA,
   '__module__' : 'update_metadata_pb2'
@@ -1099,20 +1030,6 @@
 
 DESCRIPTOR._options = None
 _SIGNATURES_SIGNATURE.fields_by_name['version']._options = None
-_IMAGEINFO.fields_by_name['board']._options = None
-_IMAGEINFO.fields_by_name['key']._options = None
-_IMAGEINFO.fields_by_name['channel']._options = None
-_IMAGEINFO.fields_by_name['version']._options = None
-_IMAGEINFO.fields_by_name['build_channel']._options = None
-_IMAGEINFO.fields_by_name['build_version']._options = None
 _INSTALLOPERATION_TYPE.values_by_name["MOVE"]._options = None
 _INSTALLOPERATION_TYPE.values_by_name["BSDIFF"]._options = None
-_DELTAARCHIVEMANIFEST.fields_by_name['install_operations']._options = None
-_DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations']._options = None
-_DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info']._options = None
-_DELTAARCHIVEMANIFEST.fields_by_name['new_kernel_info']._options = None
-_DELTAARCHIVEMANIFEST.fields_by_name['old_rootfs_info']._options = None
-_DELTAARCHIVEMANIFEST.fields_by_name['new_rootfs_info']._options = None
-_DELTAARCHIVEMANIFEST.fields_by_name['old_image_info']._options = None
-_DELTAARCHIVEMANIFEST.fields_by_name['new_image_info']._options = None
 # @@protoc_insertion_point(module_scope)
diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py
deleted file mode 100644
index 29ccb8e..0000000
--- a/scripts/update_payload/applier.py
+++ /dev/null
@@ -1,621 +0,0 @@
-#
-# Copyright (C) 2013 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""Applying a Chrome OS update payload.
-
-This module is used internally by the main Payload class for applying an update
-payload. The interface for invoking the applier is as follows:
-
-  applier = PayloadApplier(payload)
-  applier.Run(...)
-
-"""
-
-from __future__ import absolute_import
-from __future__ import print_function
-
-import array
-import bz2
-import hashlib
-# Not everywhere we can have the lzma library so we ignore it if we didn't have
-# it because it is not going to be used. For example, 'cros flash' uses
-# devserver code which eventually loads this file, but the lzma library is not
-# included in the client test devices, and it is not necessary to do so. But
-# lzma is not used in 'cros flash' so it should be fine. Python 3.x include
-# lzma, but for backward compatibility with Python 2.7, backports-lzma is
-# needed.
-try:
-  import lzma
-except ImportError:
-  try:
-    from backports import lzma
-  except ImportError:
-    pass
-import os
-import subprocess
-import sys
-import tempfile
-
-from update_payload import common
-from update_payload.error import PayloadError
-
-#
-# Helper functions.
-#
-def _VerifySha256(file_obj, expected_hash, name, length=-1):
-  """Verifies the SHA256 hash of a file.
-
-  Args:
-    file_obj: file object to read
-    expected_hash: the hash digest we expect to be getting
-    name: name string of this hash, for error reporting
-    length: precise length of data to verify (optional)
-
-  Raises:
-    PayloadError if computed hash doesn't match expected one, or if fails to
-    read the specified length of data.
-  """
-  hasher = hashlib.sha256()
-  block_length = 1024 * 1024
-  max_length = length if length >= 0 else sys.maxsize
-
-  while max_length > 0:
-    read_length = min(max_length, block_length)
-    data = file_obj.read(read_length)
-    if not data:
-      break
-    max_length -= len(data)
-    hasher.update(data)
-
-  if length >= 0 and max_length > 0:
-    raise PayloadError(
-        'insufficient data (%d instead of %d) when verifying %s' %
-        (length - max_length, length, name))
-
-  actual_hash = hasher.digest()
-  if actual_hash != expected_hash:
-    raise PayloadError('%s hash (%s) not as expected (%s)' %
-                       (name, common.FormatSha256(actual_hash),
-                        common.FormatSha256(expected_hash)))
-
-
-def _ReadExtents(file_obj, extents, block_size, max_length=-1):
-  """Reads data from file as defined by extent sequence.
-
-  This tries to be efficient by not copying data as it is read in chunks.
-
-  Args:
-    file_obj: file object
-    extents: sequence of block extents (offset and length)
-    block_size: size of each block
-    max_length: maximum length to read (optional)
-
-  Returns:
-    A character array containing the concatenated read data.
-  """
-  data = array.array('B')
-  if max_length < 0:
-    max_length = sys.maxsize
-  for ex in extents:
-    if max_length == 0:
-      break
-    read_length = min(max_length, ex.num_blocks * block_size)
-
-    file_obj.seek(ex.start_block * block_size)
-    data.fromfile(file_obj, read_length)
-
-    max_length -= read_length
-
-  return data
-
-
-def _WriteExtents(file_obj, data, extents, block_size, base_name):
-  """Writes data to file as defined by extent sequence.
-
-  This tries to be efficient by not copy data as it is written in chunks.
-
-  Args:
-    file_obj: file object
-    data: data to write
-    extents: sequence of block extents (offset and length)
-    block_size: size of each block
-    base_name: name string of extent sequence for error reporting
-
-  Raises:
-    PayloadError when things don't add up.
-  """
-  data_offset = 0
-  data_length = len(data)
-  for ex, ex_name in common.ExtentIter(extents, base_name):
-    if not data_length:
-      raise PayloadError('%s: more write extents than data' % ex_name)
-    write_length = min(data_length, ex.num_blocks * block_size)
-    file_obj.seek(ex.start_block * block_size)
-    file_obj.write(data[data_offset:(data_offset + write_length)])
-
-    data_offset += write_length
-    data_length -= write_length
-
-  if data_length:
-    raise PayloadError('%s: more data than write extents' % base_name)
-
-
-def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1):
-  """Translates an extent sequence into a bspatch-compatible string argument.
-
-  Args:
-    extents: sequence of block extents (offset and length)
-    block_size: size of each block
-    base_name: name string of extent sequence for error reporting
-    data_length: the actual total length of the data in bytes (optional)
-
-  Returns:
-    A tuple consisting of (i) a string of the form
-    "off_1:len_1,...,off_n:len_n", (ii) an offset where zero padding is needed
-    for filling the last extent, (iii) the length of the padding (zero means no
-    padding is needed and the extents cover the full length of data).
-
-  Raises:
-    PayloadError if data_length is too short or too long.
-  """
-  arg = ''
-  pad_off = pad_len = 0
-  if data_length < 0:
-    data_length = sys.maxsize
-  for ex, ex_name in common.ExtentIter(extents, base_name):
-    if not data_length:
-      raise PayloadError('%s: more extents than total data length' % ex_name)
-
-    start_byte = ex.start_block * block_size
-    num_bytes = ex.num_blocks * block_size
-    if data_length < num_bytes:
-      # We're only padding a real extent.
-      pad_off = start_byte + data_length
-      pad_len = num_bytes - data_length
-      num_bytes = data_length
-
-    arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
-    data_length -= num_bytes
-
-  if data_length:
-    raise PayloadError('%s: extents not covering full data length' % base_name)
-
-  return arg, pad_off, pad_len
-
-
-#
-# Payload application.
-#
-class PayloadApplier(object):
-  """Applying an update payload.
-
-  This is a short-lived object whose purpose is to isolate the logic used for
-  applying an update payload.
-  """
-
-  def __init__(self, payload, bsdiff_in_place=True, bspatch_path=None,
-               puffpatch_path=None, truncate_to_expected_size=True):
-    """Initialize the applier.
-
-    Args:
-      payload: the payload object to check
-      bsdiff_in_place: whether to perform BSDIFF operation in-place (optional)
-      bspatch_path: path to the bspatch binary (optional)
-      puffpatch_path: path to the puffpatch binary (optional)
-      truncate_to_expected_size: whether to truncate the resulting partitions
-                                 to their expected sizes, as specified in the
-                                 payload (optional)
-    """
-    assert payload.is_init, 'uninitialized update payload'
-    self.payload = payload
-    self.block_size = payload.manifest.block_size
-    self.minor_version = payload.manifest.minor_version
-    self.bsdiff_in_place = bsdiff_in_place
-    self.bspatch_path = bspatch_path or 'bspatch'
-    self.puffpatch_path = puffpatch_path or 'puffin'
-    self.truncate_to_expected_size = truncate_to_expected_size
-
-  def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
-    """Applies a REPLACE{,_BZ,_XZ} operation.
-
-    Args:
-      op: the operation object
-      op_name: name string for error reporting
-      out_data: the data to be written
-      part_file: the partition file object
-      part_size: the size of the partition
-
-    Raises:
-      PayloadError if something goes wrong.
-    """
-    block_size = self.block_size
-    data_length = len(out_data)
-
-    # Decompress data if needed.
-    if op.type == common.OpType.REPLACE_BZ:
-      out_data = bz2.decompress(out_data)
-      data_length = len(out_data)
-    elif op.type == common.OpType.REPLACE_XZ:
-      # pylint: disable=no-member
-      out_data = lzma.decompress(out_data)
-      data_length = len(out_data)
-
-    # Write data to blocks specified in dst extents.
-    data_start = 0
-    for ex, ex_name in common.ExtentIter(op.dst_extents,
-                                         '%s.dst_extents' % op_name):
-      start_block = ex.start_block
-      num_blocks = ex.num_blocks
-      count = num_blocks * block_size
-
-      data_end = data_start + count
-
-      # Make sure we're not running past partition boundary.
-      if (start_block + num_blocks) * block_size > part_size:
-        raise PayloadError(
-            '%s: extent (%s) exceeds partition size (%d)' %
-            (ex_name, common.FormatExtent(ex, block_size),
-             part_size))
-
-      # Make sure that we have enough data to write.
-      if data_end >= data_length + block_size:
-        raise PayloadError(
-            '%s: more dst blocks than data (even with padding)')
-
-      # Pad with zeros if necessary.
-      if data_end > data_length:
-        padding = data_end - data_length
-        out_data += b'\0' * padding
-
-      self.payload.payload_file.seek(start_block * block_size)
-      part_file.seek(start_block * block_size)
-      part_file.write(out_data[data_start:data_end])
-
-      data_start += count
-
-    # Make sure we wrote all data.
-    if data_start < data_length:
-      raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
-                         (op_name, data_start, data_length))
-
-  def _ApplyZeroOperation(self, op, op_name, part_file):
-    """Applies a ZERO operation.
-
-    Args:
-      op: the operation object
-      op_name: name string for error reporting
-      part_file: the partition file object
-
-    Raises:
-      PayloadError if something goes wrong.
-    """
-    block_size = self.block_size
-    base_name = '%s.dst_extents' % op_name
-
-    # Iterate over the extents and write zero.
-    # pylint: disable=unused-variable
-    for ex, ex_name in common.ExtentIter(op.dst_extents, base_name):
-      part_file.seek(ex.start_block * block_size)
-      part_file.write(b'\0' * (ex.num_blocks * block_size))
-
-  def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
-                                new_part_file):
-    """Applies a SOURCE_COPY operation.
-
-    Args:
-      op: the operation object
-      op_name: name string for error reporting
-      old_part_file: the old partition file object
-      new_part_file: the new partition file object
-
-    Raises:
-      PayloadError if something goes wrong.
-    """
-    if not old_part_file:
-      raise PayloadError(
-          '%s: no source partition file provided for operation type (%d)' %
-          (op_name, op.type))
-
-    block_size = self.block_size
-
-    # Gather input raw data from src extents.
-    in_data = _ReadExtents(old_part_file, op.src_extents, block_size)
-
-    # Dump extracted data to dst extents.
-    _WriteExtents(new_part_file, in_data, op.dst_extents, block_size,
-                  '%s.dst_extents' % op_name)
-
-  def _BytesInExtents(self, extents, base_name):
-    """Counts the length of extents in bytes.
-
-    Args:
-      extents: The list of Extents.
-      base_name: For error reporting.
-
-    Returns:
-      The number of bytes in extents.
-    """
-
-    length = 0
-    # pylint: disable=unused-variable
-    for ex, ex_name in common.ExtentIter(extents, base_name):
-      length += ex.num_blocks * self.block_size
-    return length
-
-  def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file,
-                          new_part_file):
-    """Applies a SOURCE_BSDIFF, BROTLI_BSDIFF or PUFFDIFF operation.
-
-    Args:
-      op: the operation object
-      op_name: name string for error reporting
-      patch_data: the binary patch content
-      old_part_file: the source partition file object
-      new_part_file: the target partition file object
-
-    Raises:
-      PayloadError if something goes wrong.
-    """
-    if not old_part_file:
-      raise PayloadError(
-          '%s: no source partition file provided for operation type (%d)' %
-          (op_name, op.type))
-
-    block_size = self.block_size
-
-    # Dump patch data to file.
-    with tempfile.NamedTemporaryFile(delete=False) as patch_file:
-      patch_file_name = patch_file.name
-      patch_file.write(patch_data)
-
-    if (hasattr(new_part_file, 'fileno') and
-        ((not old_part_file) or hasattr(old_part_file, 'fileno'))):
-      # Construct input and output extents argument for bspatch.
-
-      in_extents_arg, _, _ = _ExtentsToBspatchArg(
-          op.src_extents, block_size, '%s.src_extents' % op_name,
-          data_length=op.src_length if op.src_length else
-          self._BytesInExtents(op.src_extents, "%s.src_extents"))
-      out_extents_arg, pad_off, pad_len = _ExtentsToBspatchArg(
-          op.dst_extents, block_size, '%s.dst_extents' % op_name,
-          data_length=op.dst_length if op.dst_length else
-          self._BytesInExtents(op.dst_extents, "%s.dst_extents"))
-
-      new_file_name = '/dev/fd/%d' % new_part_file.fileno()
-      # Diff from source partition.
-      old_file_name = '/dev/fd/%d' % old_part_file.fileno()
-
-      # In python3, file descriptors(fd) are not passed to child processes by
-      # default. To pass the fds to the child processes, we need to set the flag
-      # 'inheritable' in the fds and make the subprocess calls with the argument
-      # close_fds set to False.
-      if sys.version_info.major >= 3:
-        os.set_inheritable(new_part_file.fileno(), True)
-        os.set_inheritable(old_part_file.fileno(), True)
-
-      if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF):
-        # Invoke bspatch on partition file with extents args.
-        bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name,
-                       patch_file_name, in_extents_arg, out_extents_arg]
-        subprocess.check_call(bspatch_cmd, close_fds=False)
-      elif op.type == common.OpType.PUFFDIFF:
-        # Invoke puffpatch on partition file with extents args.
-        puffpatch_cmd = [self.puffpatch_path,
-                         "--operation=puffpatch",
-                         "--src_file=%s" % old_file_name,
-                         "--dst_file=%s" % new_file_name,
-                         "--patch_file=%s" % patch_file_name,
-                         "--src_extents=%s" % in_extents_arg,
-                         "--dst_extents=%s" % out_extents_arg]
-        subprocess.check_call(puffpatch_cmd, close_fds=False)
-      else:
-        raise PayloadError("Unknown operation %s" % op.type)
-
-      # Pad with zeros past the total output length.
-      if pad_len:
-        new_part_file.seek(pad_off)
-        new_part_file.write(b'\0' * pad_len)
-    else:
-      # Gather input raw data and write to a temp file.
-      input_part_file = old_part_file if old_part_file else new_part_file
-      in_data = _ReadExtents(input_part_file, op.src_extents, block_size,
-                             max_length=op.src_length if op.src_length else
-                             self._BytesInExtents(op.src_extents,
-                                                  "%s.src_extents"))
-      with tempfile.NamedTemporaryFile(delete=False) as in_file:
-        in_file_name = in_file.name
-        in_file.write(in_data)
-
-      # Allocate temporary output file.
-      with tempfile.NamedTemporaryFile(delete=False) as out_file:
-        out_file_name = out_file.name
-
-      if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF):
-        # Invoke bspatch.
-        bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name,
-                       patch_file_name]
-        subprocess.check_call(bspatch_cmd)
-      elif op.type == common.OpType.PUFFDIFF:
-        # Invoke puffpatch.
-        puffpatch_cmd = [self.puffpatch_path,
-                         "--operation=puffpatch",
-                         "--src_file=%s" % in_file_name,
-                         "--dst_file=%s" % out_file_name,
-                         "--patch_file=%s" % patch_file_name]
-        subprocess.check_call(puffpatch_cmd)
-      else:
-        raise PayloadError("Unknown operation %s" % op.type)
-
-      # Read output.
-      with open(out_file_name, 'rb') as out_file:
-        out_data = out_file.read()
-        if len(out_data) != op.dst_length:
-          raise PayloadError(
-              '%s: actual patched data length (%d) not as expected (%d)' %
-              (op_name, len(out_data), op.dst_length))
-
-      # Write output back to partition, with padding.
-      unaligned_out_len = len(out_data) % block_size
-      if unaligned_out_len:
-        out_data += b'\0' * (block_size - unaligned_out_len)
-      _WriteExtents(new_part_file, out_data, op.dst_extents, block_size,
-                    '%s.dst_extents' % op_name)
-
-      # Delete input/output files.
-      os.remove(in_file_name)
-      os.remove(out_file_name)
-
-    # Delete patch file.
-    os.remove(patch_file_name)
-
-  def _ApplyOperations(self, operations, base_name, old_part_file,
-                       new_part_file, part_size):
-    """Applies a sequence of update operations to a partition.
-
-    Args:
-      operations: the sequence of operations
-      base_name: the name of the operation sequence
-      old_part_file: the old partition file object, open for reading/writing
-      new_part_file: the new partition file object, open for reading/writing
-      part_size: the partition size
-
-    Raises:
-      PayloadError if anything goes wrong while processing the payload.
-    """
-    for op, op_name in common.OperationIter(operations, base_name):
-      # Read data blob.
-      data = self.payload.ReadDataBlob(op.data_offset, op.data_length)
-
-      if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
-                     common.OpType.REPLACE_XZ):
-        self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
-      elif op.type == common.OpType.ZERO:
-        self._ApplyZeroOperation(op, op_name, new_part_file)
-      elif op.type == common.OpType.SOURCE_COPY:
-        self._ApplySourceCopyOperation(op, op_name, old_part_file,
-                                       new_part_file)
-      elif op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.PUFFDIFF,
-                       common.OpType.BROTLI_BSDIFF):
-        self._ApplyDiffOperation(op, op_name, data, old_part_file,
-                                 new_part_file)
-      else:
-        raise PayloadError('%s: unknown operation type (%d)' %
-                           (op_name, op.type))
-
-  def _ApplyToPartition(self, operations, part_name, base_name,
-                        new_part_file_name, new_part_info,
-                        old_part_file_name=None, old_part_info=None):
-    """Applies an update to a partition.
-
-    Args:
-      operations: the sequence of update operations to apply
-      part_name: the name of the partition, for error reporting
-      base_name: the name of the operation sequence
-      new_part_file_name: file name to write partition data to
-      new_part_info: size and expected hash of dest partition
-      old_part_file_name: file name of source partition (optional)
-      old_part_info: size and expected hash of source partition (optional)
-
-    Raises:
-      PayloadError if anything goes wrong with the update.
-    """
-    # Do we have a source partition?
-    if old_part_file_name:
-      # Verify the source partition.
-      with open(old_part_file_name, 'rb') as old_part_file:
-        _VerifySha256(old_part_file, old_part_info.hash,
-                      'old ' + part_name, length=old_part_info.size)
-      new_part_file_mode = 'r+b'
-      open(new_part_file_name, 'w').close()
-
-    else:
-      # We need to create/truncate the dst partition file.
-      new_part_file_mode = 'w+b'
-
-    # Apply operations.
-    with open(new_part_file_name, new_part_file_mode) as new_part_file:
-      old_part_file = (open(old_part_file_name, 'r+b')
-                       if old_part_file_name else None)
-      try:
-        self._ApplyOperations(operations, base_name, old_part_file,
-                              new_part_file, new_part_info.size)
-      finally:
-        if old_part_file:
-          old_part_file.close()
-
-      # Truncate the result, if so instructed.
-      if self.truncate_to_expected_size:
-        new_part_file.seek(0, 2)
-        if new_part_file.tell() > new_part_info.size:
-          new_part_file.seek(new_part_info.size)
-          new_part_file.truncate()
-
-    # Verify the resulting partition.
-    with open(new_part_file_name, 'rb') as new_part_file:
-      _VerifySha256(new_part_file, new_part_info.hash,
-                    'new ' + part_name, length=new_part_info.size)
-
-  def Run(self, new_parts, old_parts=None):
-    """Applier entry point, invoking all update operations.
-
-    Args:
-      new_parts: map of partition name to dest partition file
-      old_parts: map of partition name to source partition file (optional)
-
-    Raises:
-      PayloadError if payload application failed.
-    """
-    if old_parts is None:
-      old_parts = {}
-
-    self.payload.ResetFile()
-
-    new_part_info = {}
-    old_part_info = {}
-    install_operations = []
-
-    manifest = self.payload.manifest
-    for part in manifest.partitions:
-      name = part.partition_name
-      new_part_info[name] = part.new_partition_info
-      old_part_info[name] = part.old_partition_info
-      install_operations.append((name, part.operations))
-
-    part_names = set(new_part_info.keys())  # Equivalently, old_part_info.keys()
-
-    # Make sure the arguments are sane and match the payload.
-    new_part_names = set(new_parts.keys())
-    if new_part_names != part_names:
-      raise PayloadError('missing dst partition(s) %s' %
-                         ', '.join(part_names - new_part_names))
-
-    old_part_names = set(old_parts.keys())
-    if part_names - old_part_names:
-      if self.payload.IsDelta():
-        raise PayloadError('trying to apply a delta update without src '
-                           'partition(s) %s' %
-                           ', '.join(part_names - old_part_names))
-    elif old_part_names == part_names:
-      if self.payload.IsFull():
-        raise PayloadError('trying to apply a full update onto src partitions')
-    else:
-      raise PayloadError('not all src partitions provided')
-
-    for name, operations in install_operations:
-      # Apply update to partition.
-      self._ApplyToPartition(
-          operations, name, '%s_install_operations' % name, new_parts[name],
-          new_part_info[name], old_parts.get(name, None), old_part_info[name])
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py
index 56a9370..44b6811 100644
--- a/scripts/update_payload/checker.py
+++ b/scripts/update_payload/checker.py
@@ -42,7 +42,7 @@
 from update_payload import error
 from update_payload import format_utils
 from update_payload import histogram
-from update_payload import update_metadata_pb2
+import update_metadata_pb2
 
 #
 # Constants.
diff --git a/scripts/update_payload/checker_unittest.py b/scripts/update_payload/checker_unittest.py
index 993b785..cf813fd 100755
--- a/scripts/update_payload/checker_unittest.py
+++ b/scripts/update_payload/checker_unittest.py
@@ -37,7 +37,7 @@
 from update_payload import checker
 from update_payload import common
 from update_payload import test_utils
-from update_payload import update_metadata_pb2
+import update_metadata_pb2
 from update_payload.error import PayloadError
 from update_payload.payload import Payload  # Avoid name conflicts later.
 
diff --git a/scripts/update_payload/common.py b/scripts/update_payload/common.py
index 7c6ec8f..7139f6f 100644
--- a/scripts/update_payload/common.py
+++ b/scripts/update_payload/common.py
@@ -21,7 +21,7 @@
 
 import base64
 
-from update_payload import update_metadata_pb2
+import update_metadata_pb2
 from update_payload.error import PayloadError
 
 
diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py
index 86caef7..4abd63e 100644
--- a/scripts/update_payload/payload.py
+++ b/scripts/update_payload/payload.py
@@ -18,6 +18,7 @@
 
 from __future__ import absolute_import
 from __future__ import print_function
+import binascii
 
 import hashlib
 import io
@@ -25,10 +26,10 @@
 import struct
 import zipfile
 
-from update_payload import applier
+import update_metadata_pb2
+
 from update_payload import checker
 from update_payload import common
-from update_payload import update_metadata_pb2
 from update_payload.error import PayloadError
 
 
@@ -123,15 +124,22 @@
       payload_file_offset: the offset of the actual payload
     """
     if zipfile.is_zipfile(payload_file):
+      self.name = payload_file
       with zipfile.ZipFile(payload_file) as zfp:
+        if "payload.bin" not in zfp.namelist():
+          raise ValueError(f"payload.bin missing in archive {payload_file}")
         self.payload_file = zfp.open("payload.bin", "r")
     elif isinstance(payload_file, str):
+      self.name = payload_file
       payload_fp = open(payload_file, "rb")
       payload_bytes = mmap.mmap(
           payload_fp.fileno(), 0, access=mmap.ACCESS_READ)
       self.payload_file = io.BytesIO(payload_bytes)
     else:
+      self.name = payload_file.name
       self.payload_file = payload_file
+    self.payload_file_size = self.payload_file.seek(0, io.SEEK_END)
+    self.payload_file.seek(0, io.SEEK_SET)
     self.payload_file_offset = payload_file_offset
     self.manifest_hasher = None
     self.is_init = False
@@ -141,6 +149,7 @@
     self.metadata_signature = None
     self.payload_signature = None
     self.metadata_size = None
+    self.Init()
 
   @property
   def is_incremental(self):
@@ -150,6 +159,20 @@
   def is_partial(self):
     return self.manifest.partial_update
 
+  @property
+  def total_data_length(self):
+    """Return the total data length of this payload, excluding payload
+    signature at the very end.
+    """
+    # Operations are sorted in ascending data_offset order, so iterating
+    # backwards and find the first one with non zero data_offset will tell
+    # us total data length
+    for partition in reversed(self.manifest.partitions):
+      for op in reversed(partition.operations):
+        if op.data_length > 0:
+          return op.data_offset + op.data_length
+    return 0
+
   def _ReadHeader(self):
     """Reads and returns the payload header.
 
@@ -223,7 +246,7 @@
       correctly.
     """
     if self.is_init:
-      raise PayloadError('payload object already initialized')
+      return
 
     self.manifest_hasher = hashlib.sha256()
 
@@ -245,7 +268,7 @@
     self.metadata_size = self.header.size + self.header.manifest_len
     self.data_offset = self.metadata_size + self.header.metadata_signature_len
 
-    if self.manifest.signatures_offset and self.manifest.signatures_size:
+    if self.manifest.signatures_offset and self.manifest.signatures_size and self.manifest.signatures_offset + self.manifest.signatures_size <= self.payload_file_size:
       payload_signature_blob = self.ReadDataBlob(
           self.manifest.signatures_offset, self.manifest.signatures_size)
       payload_signature = update_metadata_pb2.Signatures()
@@ -305,29 +328,16 @@
                part_sizes=part_sizes,
                report_out_file=report_out_file)
 
-  def Apply(self, new_parts, old_parts=None, bsdiff_in_place=True,
-            bspatch_path=None, puffpatch_path=None,
-            truncate_to_expected_size=True):
-    """Applies the update payload.
-
-    Args:
-      new_parts: map of partition name to dest partition file
-      old_parts: map of partition name to partition file (optional)
-      bsdiff_in_place: whether to perform BSDIFF operations in-place (optional)
-      bspatch_path: path to the bspatch binary (optional)
-      puffpatch_path: path to the puffpatch binary (optional)
-      truncate_to_expected_size: whether to truncate the resulting partitions
-                                 to their expected sizes, as specified in the
-                                 payload (optional)
-
-    Raises:
-      PayloadError if payload application failed.
-    """
-    self._AssertInit()
-
-    # Create a short-lived payload applier object and run it.
-    helper = applier.PayloadApplier(
-        self, bsdiff_in_place=bsdiff_in_place, bspatch_path=bspatch_path,
-        puffpatch_path=puffpatch_path,
-        truncate_to_expected_size=truncate_to_expected_size)
-    helper.Run(new_parts, old_parts=old_parts)
+  def CheckDataHash(self):
+    for part in self.manifest.partitions:
+      for op in part.operations:
+        if op.data_length == 0:
+          continue
+        if not op.data_sha256_hash:
+          raise PayloadError(
+              f"Operation {op} in partition {part.partition_name} missing data_sha256_hash")
+        blob = self.ReadDataBlob(op.data_offset, op.data_length)
+        blob_hash = hashlib.sha256(blob)
+        if blob_hash.digest() != op.data_sha256_hash:
+          raise PayloadError(
+              f"Operation {op} in partition {part.partition_name} has unexpected hash, expected: {binascii.hexlify(op.data_sha256_hash)}, actual: {blob_hash.hexdigest()}")
diff --git a/scripts/update_payload/test_utils.py b/scripts/update_payload/test_utils.py
index e153669..7005827 100644
--- a/scripts/update_payload/test_utils.py
+++ b/scripts/update_payload/test_utils.py
@@ -27,7 +27,7 @@
 
 from update_payload import common
 from update_payload import payload
-from update_payload import update_metadata_pb2
+import update_metadata_pb2
 
 
 class TestError(Exception):
diff --git a/test_http_server.cc b/test_http_server.cc
index 388949f..ba5e9ac 100644
--- a/test_http_server.cc
+++ b/test_http_server.cc
@@ -112,7 +112,7 @@
   LOG(INFO) << "URL: " << request->url;
 
   // Decode remaining lines.
-  size_t i;
+  size_t i{};
   for (i = 1; i < lines.size(); i++) {
     terms = base::SplitString(lines[i],
                               base::kWhitespaceASCII,
@@ -184,7 +184,7 @@
                      const off_t start_offset,
                      const off_t end_offset,
                      HttpResponseCode return_code) {
-  ssize_t written = 0, ret;
+  ssize_t written = 0, ret{};
 
   ret = WriteString(fd,
                     string("HTTP/1.1 ") + Itoa(return_code) + " " +
@@ -239,7 +239,7 @@
   string line;
   line.reserve(line_len);
   char byte = first_byte;
-  size_t i;
+  size_t i{};
   for (i = 0; i < line_len; i++)
     line += byte++;
 
@@ -299,7 +299,7 @@
                   const size_t truncate_length,
                   const int sleep_every,
                   const int sleep_secs) {
-  ssize_t ret;
+  ssize_t ret{};
   size_t written = 0;
 
   // Obtain start offset, make sure it is within total payload length.
@@ -403,7 +403,7 @@
   LOG(INFO) << "Code: " << code << " " << status;
   LOG(INFO) << "New URL: " << url;
 
-  ssize_t ret;
+  ssize_t ret{};
   if ((ret = WriteString(fd, "HTTP/1.1 " + Itoa(code) + " " + status + EOL)) <
       0)
     return;
@@ -416,7 +416,7 @@
 ssize_t HandleError(int fd, const HttpRequest& request) {
   LOG(INFO) << "Generating error HTTP response";
 
-  ssize_t ret;
+  ssize_t ret{};
   size_t written = 0;
 
   const string data("This is an error page.");
@@ -444,7 +444,7 @@
   if (request.start_offset > 0 && num_fails < max_fails) {
     LOG(INFO) << "Generating error HTTP response";
 
-    ssize_t ret;
+    ssize_t ret{};
     size_t written = 0;
 
     const string data("This is an error page.");
@@ -475,7 +475,7 @@
 
 void HandleHang(int fd) {
   LOG(INFO) << "Hanging until the other side of the connection is closed.";
-  char c;
+  char c{};
   while (HANDLE_EINTR(read(fd, &c, 1)) > 0) {
   }
 }
@@ -484,7 +484,7 @@
   const off_t start_offset = request.start_offset;
   const string data("unhandled path");
   const size_t size = data.size();
-  ssize_t ret;
+  ssize_t ret{};
 
   if ((ret = WriteHeaders(fd, start_offset, size, request.return_code)) < 0)
     return;
diff --git a/update_metadata.proto b/update_metadata.proto
index 3f454ad..3881464 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -92,7 +92,6 @@
 syntax = "proto2";
 
 package chromeos_update_engine;
-option optimize_for = LITE_RUNTIME;
 
 // Data is packed into blocks on disk, always starting from the beginning
 // of the block. If a file's data is too large for one block, it overflows
@@ -331,6 +330,11 @@
   repeated string partition_names = 3;
 }
 
+message VABCFeatureSet {
+  optional bool threaded = 1;
+  optional bool batch_writes = 2;
+}
+
 // Metadata related to all dynamic partitions.
 message DynamicPartitionMetadata {
   // All updatable groups present in |partitions| of this DeltaArchiveManifest.
@@ -361,6 +365,9 @@
   // COW version used by VABC. The represents the major version in the COW
   // header
   optional uint32 cow_version = 5;
+
+  // A collection of knobs to tune Virtual AB Compression
+  optional VABCFeatureSet vabc_feature_set = 6;
 }
 
 // Definition has been duplicated from
@@ -423,4 +430,8 @@
   // Information on compressed APEX to figure out how much space is required for
   // their decompression
   repeated ApexInfo apex_info = 17;
+
+  // Security patch level of the device, usually in the format of
+  // yyyy-mm-dd
+  optional string security_patch_level = 18;
 }