Snap for 11224086 from 2d7e052d3203129b682419883495a6739bfadac6 to mainline-tzdata5-release

Change-Id: Id8b5c345358d5d04a9774f51bd82690a34385234
diff --git a/Android.bp b/Android.bp
index c3a054f..da6cec5 100644
--- a/Android.bp
+++ b/Android.bp
@@ -140,6 +140,8 @@
         ":tflite_flex_delegate",
         ":tensorflow_c_srcs",
         ":tensorflow_c_eager_srcs",
+        ":tensorflow_core_data_mobile_srcs_no_runtime",
+        ":tensorflow_core_kernels_mobile_srcs",
         ":tensorflow_core_common_runtime_mobile_srcs_only_runtime",
         ":tensorflow_core_framework_mobile_srcs_only_runtime",
         ":tensorflow_core_framework_mobile_srcs_no_runtime",
@@ -157,6 +159,14 @@
         ":tensorflow_core_lib_mobile_src_no_runtime",
         "tensorflow/core/nccl/collective_communicator.cc",
         "tensorflow/core/example/feature_util.cc",
+        "tensorflow/core/ops/no_op.cc",
+        "tensorflow/core/ops/array_ops.cc",
+        "tensorflow/core/ops/parsing_ops.cc",
+        "tensorflow/core/ops/dataset_ops.cc",
+        "tensorflow/core/ops/function_ops.cc",
+        "tensorflow/core/ops/io_ops.cc",
+        "tensorflow/core/ops/nn_ops.cc",
+        "tensorflow/core/ops/string_ops.cc",
         "tensorflow/core/lib/wav/wav_io.cc",
         "tensorflow/compiler/jit/defs.cc",
     ],
@@ -194,6 +204,8 @@
         "-DENABLE_ABSL_IN_TENSORFLOW",
         "-DIS_MOBILE_PLATFORM",
         "-DTF_ANDROID_ENABLE_LOG_EVERY_N_SECONDS",
+        // Used to support int64, string type in //tensorflow/core/framework/register_types.h.
+        "-D__ANDROID_TYPES_FULL__",
         "-Wno-defaulted-function-deleted",
         "-Wno-deprecated-builtins",
         "-Wno-deprecated-declarations",
diff --git a/tensorflow/core/data/Android.bp b/tensorflow/core/data/Android.bp
new file mode 100644
index 0000000..c10e64f
--- /dev/null
+++ b/tensorflow/core/data/Android.bp
@@ -0,0 +1,39 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "external_tensorflow_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["external_tensorflow_license"],
+}
+
+filegroup(
+    name = "tensorflow_core_data_mobile_srcs_no_runtime",
+    srcs = [
+        "name_utils.cc",
+        "captured_function.cc",
+        "dataset_utils.cc",
+        "stats_utils.cc",
+        "metric_utils.cc",
+        "finalization_utils.cc",
+        "serialization_utils.cc",
+        "unbounded_thread_pool.cc",
+        "root_dataset.cc",
+        "rewrite_utils.cc",
+        "utils.cc",
+    ],
+)
\ No newline at end of file
diff --git a/tensorflow/core/data/dataset_utils.cc b/tensorflow/core/data/dataset_utils.cc
index 4ed4d4e..1d36192 100644
--- a/tensorflow/core/data/dataset_utils.cc
+++ b/tensorflow/core/data/dataset_utils.cc
@@ -42,7 +42,6 @@
 #include "tensorflow/core/lib/hash/hash.h"
 #include "tensorflow/core/lib/strings/proto_serialization.h"
 #include "tensorflow/core/platform/host_info.h"
-#include "tensorflow/core/platform/regexp.h"
 #include "tensorflow/core/util/determinism.h"
 #include "tensorflow/core/util/work_sharder.h"
 
diff --git a/tensorflow/core/kernels/Android.bp b/tensorflow/core/kernels/Android.bp
index 886cd39..062f03f 100644
--- a/tensorflow/core/kernels/Android.bp
+++ b/tensorflow/core/kernels/Android.bp
@@ -25,7 +25,35 @@
     name = "tensorflow_core_kernels_mobile_srcs",
     srcs = [
         "initializable_lookup_table.cc",
+        "checkpoint_callback_manager.cc",
+        "constant_op.cc",
         "lookup_util.cc",
+        "function_ops.cc",
+        "fill_functor.cc",
+        "example_parsing_ops.cc",
+        "no_op.cc",
         "pooling_ops_common.cc",
+        "save_restore_v2_ops.cc",
+        "save_restore_tensor.cc",
+        "save_op.cc",
+        "split_op.cc",
+        "split_lib_cpu.cc",
+        "string_to_hash_bucket_fast_op.cc",
+        "as_string_op.cc",
+        "tensor_to_hash_bucket_op.cc",
+        "bcast_ops.cc",
+        "shape_ops.cc",
+        "concat_op.cc",
+        "concat_lib_cpu.cc",
+        "relu_op.cc",
+        "data/take_dataset_op.cc",
+        "data/map_dataset_op.cc",
+        "data/reduce_dataset_op.cc",
+        "data/iterator_ops.cc",
+        "data/prefetch_dataset_op.cc",
+        "data/prefetch_autotuner.cc",
+        "data/optional_ops.cc",
+        "data/optional_ops_util.cc",
+        "data/batch_dataset_op.cc",
     ],
 )
diff --git a/tensorflow/core/kernels/checkpoint_callback_manager.cc b/tensorflow/core/kernels/checkpoint_callback_manager.cc
index fb94c19..0e0fae0 100644
--- a/tensorflow/core/kernels/checkpoint_callback_manager.cc
+++ b/tensorflow/core/kernels/checkpoint_callback_manager.cc
@@ -14,6 +14,7 @@
 ==============================================================================*/
 #include "tensorflow/core/kernels/checkpoint_callback_manager.h"
 
+#include <regex>
 #include <string>
 #include <utility>
 
@@ -24,7 +25,8 @@
 #include "tensorflow/core/platform/errors.h"
 #include "tensorflow/core/platform/mutex.h"
 #include "tensorflow/core/platform/path.h"
-#include "tensorflow/core/platform/regexp.h"
+// Remove RE2 usage
+// #include "tensorflow/core/platform/regexp.h"
 #include "tensorflow/core/platform/status.h"
 #include "tensorflow/core/platform/statusor.h"
 #include "tensorflow/core/platform/stringpiece.h"
@@ -38,9 +40,9 @@
 
 namespace {
 
-const absl::string_view kCheckpointFileRegex = "^part-[0-9]*-of-[0-9]*$";
-const absl::string_view kCheckpointTempDirRegex = "-[0-9]*_temp$";
-const absl::string_view kCheckpointDirRegex = "-[0-9]*$";
+const char* kCheckpointFileRegex = "^part-[0-9]*-of-[0-9]*$";
+const char* kCheckpointTempDirRegex = "-[0-9]*_temp$";
+const char* kCheckpointDirRegex = "-[0-9]*$";
 const absl::string_view kCheckpointTempDirSuffix = "_temp";
 
 void TriggerSaveCallbackIfFileNotExist(absl::string_view checkpoint_id,
@@ -115,17 +117,26 @@
 CheckpointCallbackManager::GetCheckpointIdAndPathFromPrefix(
     absl::string_view prefix) {
   for (absl::string_view path = prefix;; path = io::Dirname(path)) {
-    absl::string_view basename = io::Basename(path);
+    std::string basename = std::string(io::Basename(path));
 
     // Failed to find checkpoint_id
     if (basename.empty()) break;
 
     // Skip known checkpoint file: e.g., part-00000-of-00001
-    if (RE2::PartialMatch(basename, kCheckpointFileRegex)) continue;
+    // if (RE2::PartialMatch(basename, kCheckpointFileRegex)) continue;
+    std::regex checkpoint_file_regex(kCheckpointFileRegex);
+    if (std::regex_search(basename, checkpoint_file_regex)) continue;
 
     // With _temp suffix: e.g., checkpoint-1_temp
-    if (RE2::PartialMatch(basename, kCheckpointTempDirRegex)) {
-      // Trim suffix, "_temp".
+    // if (RE2::PartialMatch(basename, kCheckpointTempDirRegex)) {
+    //   // Trim suffix, "_temp".
+    //   return std::make_pair(
+    //       std::string(basename.substr(
+    //           0, basename.length() - kCheckpointTempDirSuffix.length())),
+    //       std::string(io::Dirname(path)));
+    // }
+    std::regex checkpoint_temp_dir_regex(kCheckpointTempDirRegex);
+    if (std::regex_search(basename, checkpoint_temp_dir_regex)) {
       return std::make_pair(
           std::string(basename.substr(
               0, basename.length() - kCheckpointTempDirSuffix.length())),
@@ -133,7 +144,12 @@
     }
 
     // Without _temp suffix: e.g., checkpoint-1
-    if (RE2::PartialMatch(basename, kCheckpointDirRegex)) {
+    // if (RE2::PartialMatch(basename, kCheckpointDirRegex)) {
+    //   return std::make_pair(std::string(basename),
+    //                         std::string(io::Dirname(path)));
+    // }
+    std::regex checkpoint_dir_regex(kCheckpointDirRegex);
+    if (std::regex_search(basename, checkpoint_dir_regex)) {
       return std::make_pair(std::string(basename),
                             std::string(io::Dirname(path)));
     }
diff --git a/tensorflow/core/kernels/constant_op.cc b/tensorflow/core/kernels/constant_op.cc
index 74fe21d..8917f92 100644
--- a/tensorflow/core/kernels/constant_op.cc
+++ b/tensorflow/core/kernels/constant_op.cc
@@ -24,7 +24,6 @@
 
 #include "tensorflow/core/kernels/constant_op.h"
 
-#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
 #include "tensorflow/core/framework/allocator.h"
 #include "tensorflow/core/framework/bounds_check.h"
 #include "tensorflow/core/framework/node_def.pb.h"
diff --git a/tensorflow/core/platform/env.cc b/tensorflow/core/platform/env.cc
index 9961fff..93b9037 100644
--- a/tensorflow/core/platform/env.cc
+++ b/tensorflow/core/platform/env.cc
@@ -327,6 +327,12 @@
   return fs->HasAtomicMove(path, has_atomic_move);
 }
 
+Status Env::CanCreateTempFile(const string& fname, bool* can_create_temp_file) {
+  FileSystem* fs;
+  TF_RETURN_IF_ERROR(GetFileSystemForFile(fname, &fs));
+  return fs->CanCreateTempFile(fname, can_create_temp_file);
+}
+
 Status Env::DeleteRecursively(const string& dirname, int64_t* undeleted_files,
                               int64_t* undeleted_dirs) {
   FileSystem* fs;
diff --git a/tensorflow/core/platform/env.h b/tensorflow/core/platform/env.h
index 86b1077..67b45c6 100644
--- a/tensorflow/core/platform/env.h
+++ b/tensorflow/core/platform/env.h
@@ -333,6 +333,15 @@
   ///  TF
   Status HasAtomicMove(const std::string& path, bool* has_atomic_move);
 
+  /// Returns whether the give path is on a file system
+  /// that has ability to create a new temp file. This can be used
+  /// to determine if there needs to be a temp location to safely write objects.
+  /// If this returns false, TensorFlow will write directly to output files
+  /// instead of creating a temporary file and swapping it in. This may mean
+  /// that incomplete writes are visible to consumers.
+  Status CanCreateTempFile(const std::string& fname,
+                           bool* can_create_temp_file);
+
   /// Stores the size of `fname` in `*file_size`.
   Status GetFileSize(const std::string& fname, uint64* file_size);
 
diff --git a/tensorflow/core/platform/file_system.cc b/tensorflow/core/platform/file_system.cc
index e170b09..5847e76 100644
--- a/tensorflow/core/platform/file_system.cc
+++ b/tensorflow/core/platform/file_system.cc
@@ -87,6 +87,12 @@
   return OkStatus();
 }
 
+Status FileSystem::CanCreateTempFile(const std::string& fname,
+                                     bool* can_create_temp_file) {
+  *can_create_temp_file = true;
+  return OkStatus();
+}
+
 void FileSystem::FlushCaches(TransactionToken* token) {}
 
 bool FileSystem::FilesExist(const std::vector<string>& files,
diff --git a/tensorflow/core/platform/file_system.h b/tensorflow/core/platform/file_system.h
index 3b74a47..be0266d 100644
--- a/tensorflow/core/platform/file_system.h
+++ b/tensorflow/core/platform/file_system.h
@@ -386,6 +386,15 @@
   ///  TF
   virtual Status HasAtomicMove(const std::string& path, bool* has_atomic_move);
 
+  /// Returns whether the give path is on a file system
+  /// that has ability to create a new temp file. This can be used
+  /// to determine if there needs to be a temp location to safely write objects.
+  /// If this returns false, TensorFlow will write directly to output files
+  /// instead of creating a temporary file and swapping it in. This may mean
+  /// that incomplete writes are visible to consumers.
+  virtual Status CanCreateTempFile(const std::string& fname,
+                                   bool* can_create_temp_file);
+
   /// \brief Flushes any cached filesystem objects from memory.
   virtual void FlushCaches() { FlushCaches(nullptr); }
 
diff --git a/tensorflow/core/platform/file_system_test.cc b/tensorflow/core/platform/file_system_test.cc
index f5ca57b..d49a252 100644
--- a/tensorflow/core/platform/file_system_test.cc
+++ b/tensorflow/core/platform/file_system_test.cc
@@ -273,6 +273,14 @@
   EXPECT_EQ(has_atomic_move, true);
 }
 
+TEST(InterPlanetaryFileSystemTest, CanCreateTempFile) {
+  InterPlanetaryFileSystem ipfs;
+  const string dirname = io::JoinPath(kPrefix, "match-00/abc/00");
+  bool can_create_temp_file;
+  TF_EXPECT_OK(ipfs.CanCreateTempFile(dirname, &can_create_temp_file));
+  EXPECT_EQ(can_create_temp_file, true);
+}
+
 // A simple file system with a root directory and a single file underneath it.
 class TestFileSystem : public NullFileSystem {
  public:
diff --git a/tensorflow/core/platform/retrying_file_system.h b/tensorflow/core/platform/retrying_file_system.h
index 1543345..ec739fb 100644
--- a/tensorflow/core/platform/retrying_file_system.h
+++ b/tensorflow/core/platform/retrying_file_system.h
@@ -144,6 +144,12 @@
     return base_file_system_->HasAtomicMove(path, has_atomic_move);
   }
 
+  Status CanCreateTempFile(const std::string& fname,
+                           bool* can_create_temp_file) override {
+    // this method does not need to be retried
+    return base_file_system_->CanCreateTempFile(fname, can_create_temp_file);
+  }
+
   Status DeleteRecursively(const string& dirname, TransactionToken* token,
                            int64_t* undeleted_files,
                            int64_t* undeleted_dirs) override {
diff --git a/tensorflow/core/platform/test.h b/tensorflow/core/platform/test.h
index b598b6e..e49f479 100644
--- a/tensorflow/core/platform/test.h
+++ b/tensorflow/core/platform/test.h
@@ -16,10 +16,11 @@
 #ifndef TENSORFLOW_CORE_PLATFORM_TEST_H_
 #define TENSORFLOW_CORE_PLATFORM_TEST_H_
 
+#include <gtest/gtest.h>  // IWYU pragma: export
+
 #include <memory>
 #include <vector>
 
-#include <gtest/gtest.h>  // IWYU pragma: export
 #include "tensorflow/core/platform/macros.h"
 #include "tensorflow/core/platform/platform.h"
 #include "tensorflow/core/platform/types.h"
@@ -39,7 +40,8 @@
 // The advantages of using gmock matchers instead of self defined matchers are
 // better error messages, more maintainable tests and more test coverage.
 #if !defined(PLATFORM_GOOGLE) && !defined(PLATFORM_GOOGLE_ANDROID) && \
-    !defined(PLATFORM_CHROMIUMOS)
+    !defined(PLATFORM_CHROMIUMOS) &&                                  \
+    !defined(IS_MOBILE_PLATFORM)  // We have gmock.h in AOSP.
 #include <gmock/gmock-generated-matchers.h>
 #include <gmock/gmock-matchers.h>
 #include <gmock/gmock-more-matchers.h>
diff --git a/tensorflow/core/util/Android.bp b/tensorflow/core/util/Android.bp
index 87395ef..e9466a4 100644
--- a/tensorflow/core/util/Android.bp
+++ b/tensorflow/core/util/Android.bp
@@ -63,6 +63,6 @@
         "sparse/sparse_tensor.cc",
         "tensor_bundle/naming.cc",
         "tensor_bundle/tensor_bundle.cc",
-
+        "tensor_bundle/byte_swap.cc",
     ],
 )
diff --git a/tensorflow/core/util/tensor_slice_writer.cc b/tensorflow/core/util/tensor_slice_writer.cc
index 75197a5..0880b65 100644
--- a/tensorflow/core/util/tensor_slice_writer.cc
+++ b/tensorflow/core/util/tensor_slice_writer.cc
@@ -85,8 +85,17 @@
                                      CreateBuilderFunction create_builder)
     : filename_(filename),
       create_builder_(std::move(create_builder)),
-      tmpname_(strings::StrCat(filename, ".tempstate", random::New64())),
       slices_(0) {
+  Env* env = Env::Default();
+  Status status = env->CanCreateTempFile(filename_, &use_temp_file_);
+  if (!status.ok()) {
+    LOG(ERROR) << "Failed to get CanCreateTempFile attribute: " << filename_;
+    use_temp_file_ = true;
+  }
+  data_filename_ = filename_;
+  if (use_temp_file_) {
+    data_filename_ = strings::StrCat(filename_, ".tempstate", random::New64());
+  }
   VersionDef* versions = sts_.mutable_meta()->mutable_versions();
   versions->set_producer(TF_CHECKPOINT_VERSION);
   versions->set_min_consumer(TF_CHECKPOINT_VERSION_MIN_CONSUMER);
@@ -94,7 +103,7 @@
 
 Status TensorSliceWriter::Finish() {
   Builder* b;
-  Status s = create_builder_(tmpname_, &b);
+  Status s = create_builder_(data_filename_, &b);
   if (!s.ok()) {
     delete b;
     return s;
@@ -113,18 +122,21 @@
 
   int64_t file_size;
   s = builder->Finish(&file_size);
-  // We need to rename the file to the proper name
-  if (s.ok()) {
-    s = Env::Default()->RenameFile(tmpname_, filename_);
+  // If use temp file, we need to rename the file to the proper name.
+  if (use_temp_file_) {
     if (s.ok()) {
-      VLOG(1) << "Written " << slices_ << " slices for "
-              << sts_.meta().tensor_size() << " tensors (" << file_size
-              << " bytes) to " << filename_;
+      s = Env::Default()->RenameFile(data_filename_, filename_);
+      if (s.ok()) {
+        VLOG(1) << "Written " << slices_ << " slices for "
+                << sts_.meta().tensor_size() << " tensors (" << file_size
+                << " bytes) to " << filename_;
+      } else {
+        LOG(ERROR) << "Failed to rename file " << data_filename_ << " to "
+                   << filename_;
+      }
     } else {
-      LOG(ERROR) << "Failed to rename file " << tmpname_ << " to " << filename_;
+      Env::Default()->DeleteFile(data_filename_).IgnoreError();
     }
-  } else {
-    Env::Default()->DeleteFile(tmpname_).IgnoreError();
   }
   return s;
 }
diff --git a/tensorflow/core/util/tensor_slice_writer.h b/tensorflow/core/util/tensor_slice_writer.h
index 34aa7c0..887db9d 100644
--- a/tensorflow/core/util/tensor_slice_writer.h
+++ b/tensorflow/core/util/tensor_slice_writer.h
@@ -83,7 +83,8 @@
 
   const string filename_;
   const CreateBuilderFunction create_builder_;
-  const string tmpname_;
+  string data_filename_;
+  bool use_temp_file_;
 
   // A mapping from the tensor names to their index in meta_.saved_slice_meta()
   std::unordered_map<string, int> name_to_index_;
diff --git a/tensorflow/lite/delegates/Android.bp b/tensorflow/lite/delegates/Android.bp
index 8c5a76f..8409b67 100644
--- a/tensorflow/lite/delegates/Android.bp
+++ b/tensorflow/lite/delegates/Android.bp
@@ -30,6 +30,7 @@
         "flex/delegate_data.cc",
         "flex/kernel.cc",
         "flex/util.cc",
+        "flex/tflite_subgraph_execute.cc",
         "utils/simple_delegate.cc",
     ],
 )
diff --git a/tensorflow/lite/delegates/nnapi/nnapi_delegate_test.cc b/tensorflow/lite/delegates/nnapi/nnapi_delegate_test.cc
index beccac3..448e253 100644
--- a/tensorflow/lite/delegates/nnapi/nnapi_delegate_test.cc
+++ b/tensorflow/lite/delegates/nnapi/nnapi_delegate_test.cc
@@ -37,6 +37,8 @@
 
 using ::testing::ElementsAre;
 using ::testing::ElementsAreArray;
+using ::testing::FloatNear;
+using ::testing::Matcher;
 
 // TODO(b/110368244): figure out how to share the existing tests in kernels/ but
 // with the delegation on. Also, add more unit tests to improve code coverage.
@@ -51,6 +53,21 @@
   return true;
 }
 
+auto NnapiArrayFloatNear(const std::vector<float>& values,
+                         bool relaxed = false) {
+  // Uses the same tolerance as NNAPI generated tests.
+  const float atol = relaxed ? 5 * 0.0009765625f : 1e-5f;
+  const float rtol = relaxed ? 5 * 0.0009765625f : 5 * 1.1920928955078125e-7f;
+
+  std::vector<Matcher<float>> matchers;
+  matchers.reserve(values.size());
+  for (const float& v : values) {
+    const float tolerance = atol + rtol * std::abs(v);
+    matchers.emplace_back(FloatNear(v, tolerance));
+  }
+  return ElementsAreArray(matchers);
+}
+
 class SingleOpModelWithNNAPI : public SingleOpModel {
  public:
   SingleOpModelWithNNAPI() { options_.disallow_nnapi_cpu = false; }
@@ -196,7 +213,7 @@
   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({-1.9, 0.4, 1.0, 1.3}));
 }
 
 // Do a test with scalar input using no activation.
@@ -207,7 +224,7 @@
   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.7});
   m.PopulateTensor<float>(m.input2(), {0.1});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.3, 0.8, 0.8}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({-1.9, 0.3, 0.8, 0.8}));
 }
 
 // Do a test with the NN API using no activation.
@@ -220,7 +237,8 @@
   m.PopulateTensor<float>(m.input1(), {-2.0, -1.0, 1.0, 2.0});
   m.PopulateTensor<float>(m.input2(), {1.0, 2.0, 3.0, 4.0});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.0, 1.0, 4.0, 6.0}));
+  EXPECT_THAT(m.GetOutput(),
+              NnapiArrayFloatNear({-1.0, 1.0, 4.0, 6.0}, /*relaxed=*/true));
 }
 
 // Do a test with the NN api with relu.
@@ -231,7 +249,7 @@
   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({0.0, 0.4, 1.0, 1.3}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({0.0, 0.4, 1.0, 1.3}));
 }
 
 // Verify that resize attempts succeed.
@@ -246,7 +264,8 @@
   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8, 0.9, 0.7});
   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5, 0.2, 0.8});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3, 1.1, 1.5}));
+  EXPECT_THAT(m.GetOutput(),
+              NnapiArrayFloatNear({-1.9, 0.4, 1.0, 1.3, 1.1, 1.5}));
 
   EXPECT_EQ(m.ResizeInputTensor(m.input1(), {1, 2, 2, 1}), kTfLiteOk);
   EXPECT_EQ(m.ResizeInputTensor(m.input2(), {1, 2, 2, 1}), kTfLiteOk);
@@ -254,7 +273,7 @@
   m.PopulateTensor<float>(m.input1(), {0.7, 0.8, 0.9, 0.7});
   m.PopulateTensor<float>(m.input2(), {0.3, 0.5, 0.2, 0.8});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({1.0, 1.3, 1.1, 1.5}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({1.0, 1.3, 1.1, 1.5}));
 }
 
 TEST(NNAPIDelegate, ResizeDynamicBatchInputTensorsWorks) {
@@ -337,7 +356,7 @@
   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({-1.9, 0.4, 1.0, 1.3}));
 }
 
 // Sanity check for the state-ful NNAPI delegate with accelerator_name
@@ -354,7 +373,7 @@
   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({-1.9, 0.4, 1.0, 1.3}));
 }
 
 // Sanity check for the state-ful NNAPI delegate with invalid accelerator_name
@@ -380,7 +399,7 @@
   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({-1.9, 0.4, 1.0, 1.3}));
 }
 
 // Sanity check for the state-ful NNAPI delegate with compilation caching
@@ -398,7 +417,7 @@
   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({-1.9, 0.4, 1.0, 1.3}));
 }
 
 // Sanity check for the state-ful NNAPI delegate with QoS hints.
@@ -416,7 +435,7 @@
   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({-1.9, 0.4, 1.0, 1.3}));
 }
 
 // Sanity check for the state-ful NNAPI delegate using TfLiteBufferHandle.
@@ -480,7 +499,7 @@
   m.MarkInputTensorDataStale(m.input1());
   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({-1.9, 0.4, 1.0, 1.3}));
 
   // Run the inference multiple times with the same buffer so that the execution
   // can be reused.
@@ -490,7 +509,7 @@
     memcpy(input1_memory_data, input1_data, kInput1ByteSize);
     m.MarkInputTensorDataStale(m.input1());
     ASSERT_EQ(m.Invoke(), kTfLiteOk);
-    EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9 + i, 0.4, 1.0, 1.3}));
+    EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({-1.9f + i, 0.4f, 1.0f, 1.3f}));
   }
 
   // Run the inference multiple times and each time register a buffer.
@@ -505,7 +524,8 @@
     m.MarkInputTensorDataStale(m.input1());
     m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
     ASSERT_EQ(m.Invoke(), kTfLiteOk);
-    EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9 + i, 0.4, 1.0, 1.3}));
+    EXPECT_THAT(m.GetOutput(),
+                NnapiArrayFloatNear({-1.9f + i, 0.4f, 1.0f, 1.3f}));
   }
 }
 
@@ -540,8 +560,7 @@
   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(),
-              ElementsAreArray(ArrayFloatNear({-0.2, 0.04, 0.21, 0.4})));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({-0.2, 0.04, 0.21, 0.4}));
 }
 
 class FloatPoolingOpModel : public SingleOpModelWithNNAPI {
@@ -582,7 +601,7 @@
       3, 2, 10, 7,  //
   });
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({2.75, 5.75}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({2.75, 5.75}));
 }
 
 TEST(NNAPIDelegate, MaxPoolWithNoActivation) {
@@ -595,7 +614,7 @@
       3, 2, 10, 7,  //
   });
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({6, 10}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({6, 10}));
 }
 
 TEST(NNAPIDelegate, L2PoolWithNoActivation) {
@@ -608,7 +627,7 @@
       3, 2, 10, 7,  //
   });
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({3.5, 6.5}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({3.5, 6.5}));
 }
 
 class ConvolutionOpModel : public SingleOpModelWithNNAPI {
@@ -814,7 +833,7 @@
 
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
 
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({
                                  18, 2, 5,  // first batch, left
                                  18, 2, 5,  // first batch, right
                                  17, 4, 3,  // second batch, left
@@ -914,7 +933,7 @@
   // | 5 | 5 | 5 |
   // | 5 | 5 | 5 |
   // | 5 | 5 | 5 |
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({5, 5, 5, 5, 5, 5, 5, 5, 5}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({5, 5, 5, 5, 5, 5, 5, 5, 5}));
 }
 
 class QuantizedConvolutionOpModel : public ConvolutionOpModel {
@@ -1200,7 +1219,7 @@
 
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
 
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({
                                  71, -34, 99, -20,  //
                                  91, -26, 127, -4,  //
                              }));
@@ -1412,10 +1431,9 @@
 
   EXPECT_THAT(
       m.GetOutput(),
-      ElementsAreArray(ArrayFloatNear(
-          {0.011656231, 0.031684921, 0.086128544, 0.234121657, 0.636408647,
-           0.636408647, 0.234121657, 0.086128544, 0.031684921, 0.011656231},
-          1e-6)));
+      NnapiArrayFloatNear({0.011656231, 0.031684921, 0.086128544, 0.234121657,
+                           0.636408647, 0.636408647, 0.234121657, 0.086128544,
+                           0.031684921, 0.011656231}));
 }
 
 TEST(SoftmaxOpTest, Beta2) {
@@ -1426,11 +1444,9 @@
 
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
 
-  EXPECT_THAT(
-      m.GetOutput(),
-      ElementsAreArray(ArrayFloatNear(
-          {0.000290076, 0.002143387, 0.015837606, 0.117024957, 0.864703974},
-          1e-6)));
+  EXPECT_THAT(m.GetOutput(),
+              NnapiArrayFloatNear({0.000290076, 0.002143387, 0.015837606,
+                                   0.117024957, 0.864703974}));
 }
 
 TEST(SoftmaxOpTest, 3dInput) {
@@ -1446,12 +1462,11 @@
 
   EXPECT_THAT(
       m.GetOutput(),
-      ElementsAreArray(ArrayFloatNear(
+      NnapiArrayFloatNear(
           {0.011656231, 0.031684921, 0.086128544, 0.234121657, 0.636408647,
            0.636408647, 0.234121657, 0.086128544, 0.031684921, 0.011656231,
            0.636408647, 0.011656231, 0.031684921, 0.086128544, 0.234121657,
-           0.011656231, 0.636408647, 0.234121657, 0.086128544, 0.031684921},
-          1e-6)));
+           0.011656231, 0.636408647, 0.234121657, 0.086128544, 0.031684921}));
 }
 
 TEST(SoftmaxOpTest, 4dInput) {
@@ -1467,12 +1482,11 @@
 
   EXPECT_THAT(
       m.GetOutput(),
-      ElementsAreArray(ArrayFloatNear(
+      NnapiArrayFloatNear(
           {0.011656231, 0.031684921, 0.086128544, 0.234121657, 0.636408647,
            0.636408647, 0.234121657, 0.086128544, 0.031684921, 0.011656231,
            0.636408647, 0.011656231, 0.031684921, 0.086128544, 0.234121657,
-           0.011656231, 0.636408647, 0.234121657, 0.086128544, 0.031684921},
-          1e-6)));
+           0.011656231, 0.636408647, 0.234121657, 0.086128544, 0.031684921}));
 }
 
 class ReshapeOpModel : public SingleOpModelWithNNAPI {
@@ -1507,7 +1521,7 @@
   ReshapeOpModel m({1, 2, 4, 1}, {2, 2, 2});
   m.SetInput({1, 2, 3, 4, 5, 6, 7, 8});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({1, 2, 3, 4, 5, 6, 7, 8}));
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
 }
 
@@ -1548,9 +1562,9 @@
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({24}));
   EXPECT_THAT(
       m.GetOutput(),
-      ElementsAreArray({1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
-                        9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
-                        17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}));
+      NnapiArrayFloatNear({1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                           9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                           17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}));
 }
 
 TEST(NNAPIDelegate, SqueezeWithAxisTest) {
@@ -1564,9 +1578,9 @@
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 24}));
   EXPECT_THAT(
       m.GetOutput(),
-      ElementsAreArray({1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
-                        9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
-                        17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}));
+      NnapiArrayFloatNear({1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                           9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                           17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}));
 }
 
 class L2NormOpModel : public SingleOpModelWithNNAPI {
@@ -1601,7 +1615,7 @@
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 6}));
   EXPECT_THAT(m.GetOutput(),
-              ElementsAreArray({-0.55, 0.3, 0.35, 0.6, -0.35, 0.05}));
+              NnapiArrayFloatNear({-0.55, 0.3, 0.35, 0.6, -0.35, 0.05}));
 }
 
 class TransposeSimpleModel : public SingleOpModelWithNNAPI {
@@ -1636,9 +1650,9 @@
               12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 2, 3}));
-  EXPECT_THAT(m.GetOutput(),
-              ElementsAreArray({0, 4, 8,  12, 16, 20, 1, 5, 9,  13, 17, 21,
-                                2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear(
+                                 {0, 4, 8,  12, 16, 20, 1, 5, 9,  13, 17, 21,
+                                  2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23}));
 }
 
 class ElementwiseOpBaseModel : public SingleOpModelWithNNAPI {
@@ -1669,7 +1683,7 @@
                                          3.f, -2.f, 10.f, 1.f,  //
                                      });
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.ExtractVector<float>(m.output()), ElementsAreArray({
+  EXPECT_THAT(m.ExtractVector<float>(m.output()), NnapiArrayFloatNear({
                                                       0.f, 6.2f, 2.f, 4.f,  //
                                                       3.f, 2.f, 10.f, 1.f,  //
                                                   }));
@@ -1680,9 +1694,9 @@
   ElementwiseOpFloatModel m(BuiltinOperator_EXP, {3, 1, 2});
   m.PopulateTensor<float>(m.input(), {1.0, 0.0, -1.0, 1.0, 1.0, -1.0});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.ExtractVector<float>(m.output()),
-              ElementsAreArray(ArrayFloatNear(
-                  {2.71828, 1, 0.367879, 2.71828, 2.71828, 0.367879})));
+  EXPECT_THAT(
+      m.ExtractVector<float>(m.output()),
+      NnapiArrayFloatNear({2.71828, 1, 0.367879, 2.71828, 2.71828, 0.367879}));
   EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({3, 1, 2}));
 }
 
@@ -1691,7 +1705,7 @@
   m.PopulateTensor<float>(m.input(), {1, 3.1415926, 1, 1});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.ExtractVector<float>(m.output()),
-              ElementsAreArray(ArrayFloatNear({0, 1.14473, 0, 0})));
+              NnapiArrayFloatNear({0, 1.14473, 0, 0}));
   EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
 }
 
@@ -1700,7 +1714,7 @@
   m.PopulateTensor<float>(m.input(), {1, 2, 4, 9});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.ExtractVector<float>(m.output()),
-              ElementsAreArray(ArrayFloatNear({1, 0.7071, 0.5, 0.33333})));
+              NnapiArrayFloatNear({1, 0.7071, 0.5, 0.33333}));
   EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
 }
 
@@ -1709,7 +1723,7 @@
   m.PopulateTensor<float>(m.input(), {0, 3.1415926, -3.1415926, 1});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.ExtractVector<float>(m.output()),
-              ElementsAreArray(ArrayFloatNear({0, 0, 0, 0.84147})));
+              NnapiArrayFloatNear({0, 0, 0, 0.84147}));
   EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
 }
 
@@ -1718,7 +1732,7 @@
   m.PopulateTensor<float>(m.input(), {0, 1, 2, 4});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.ExtractVector<float>(m.output()),
-              ElementsAreArray(ArrayFloatNear({0, 1, 1.41421, 2})));
+              NnapiArrayFloatNear({0, 1, 1.41421, 2}));
   EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
 }
 
@@ -1753,8 +1767,7 @@
   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(),
-              ElementsAreArray(ArrayFloatNear({-2.1, 0.0, 0.4, 0.3})));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({-2.1, 0.0, 0.4, 0.3}));
 }
 
 class FloatDivOpModel : public SingleOpModelWithNNAPI {
@@ -1788,7 +1801,7 @@
   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.8, 0.8});
   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.4, 0.2});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({-20, 1, 2, 4})));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({-20, 1, 2, 4}));
 }
 
 class BaseConcatenationOpModel : public SingleOpModelWithNNAPI {
@@ -1828,7 +1841,7 @@
                           /*num_inputs=*/1);
   m0.SetInput(0, {1.0f, 3.0f, 4.0f, 7.0f});
   ASSERT_EQ(m0.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m0.GetOutput(), ElementsAreArray({1, 3, 4, 7}));
+  EXPECT_THAT(m0.GetOutput(), NnapiArrayFloatNear({1, 3, 4, 7}));
 }
 
 TEST(NNAPIDelegate, ConcatenationFourInputs) {
@@ -1840,7 +1853,7 @@
   m0.SetInput(3, {1.3f, 3.3f, 4.3f, 7.3f});
   ASSERT_EQ(m0.Invoke(), kTfLiteOk);
   EXPECT_THAT(m0.GetOutput(),
-              ElementsAreArray({
+              NnapiArrayFloatNear({
                   1.0f, 3.0f, 1.1f, 3.1f, 1.2f, 3.2f, 1.3f, 3.3f,  //
                   4.0f, 7.0f, 4.1f, 7.1f, 4.2f, 7.2f, 4.3f, 7.3f,  //
               }));
@@ -1991,7 +2004,7 @@
   FloorOpModel model({2}, TensorType_FLOAT32);
   model.PopulateTensor<float>(model.input(), {8.5, 0.0});
   ASSERT_EQ(model.Invoke(), kTfLiteOk);
-  EXPECT_THAT(model.GetOutput(), ElementsAreArray({8, 0}));
+  EXPECT_THAT(model.GetOutput(), NnapiArrayFloatNear({8, 0}));
   EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2}));
 }
 
@@ -2011,7 +2024,7 @@
                                              });
   ASSERT_EQ(model.Invoke(), kTfLiteOk);
   EXPECT_THAT(model.GetOutput(),
-              ElementsAreArray({0, 8, 0, 9, 0, -1, -9, -1, -10, -1}));
+              NnapiArrayFloatNear({0, 8, 0, 9, 0, -1, -9, -1, -10, -1}));
   EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 1, 1, 5}));
 }
 
@@ -2046,9 +2059,8 @@
   m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   // The result is every input divided by 2.
-  EXPECT_THAT(
-      m.GetOutput(),
-      ElementsAreArray(ArrayFloatNear({-0.55, 0.3, 0.35, 0.6, -0.35, 0.05})));
+  EXPECT_THAT(m.GetOutput(),
+              NnapiArrayFloatNear({-0.55, 0.3, 0.35, 0.6, -0.35, 0.05}));
 }
 
 TEST(NNAPIDelegate, LocalResponseNormWithAlpha) {
@@ -2057,8 +2069,8 @@
   m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   // The result is every input divided by 3.
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
-                                 {-0.275, 0.15, 0.175, 0.3, -0.175, 0.025})));
+  EXPECT_THAT(m.GetOutput(),
+              NnapiArrayFloatNear({-0.275, 0.15, 0.175, 0.3, -0.175, 0.025}));
 }
 
 TEST(NNAPIDelegate, LocalResponseNormWithBias) {
@@ -2067,9 +2079,8 @@
   m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   // The result is every input divided by 5.
-  EXPECT_THAT(
-      m.GetOutput(),
-      ElementsAreArray(ArrayFloatNear({-0.22, 0.12, 0.14, 0.24, -0.14, 0.02})));
+  EXPECT_THAT(m.GetOutput(),
+              NnapiArrayFloatNear({-0.22, 0.12, 0.14, 0.24, -0.14, 0.02}));
 }
 
 TEST(NNAPIDelegate, LocalResponseNormSmallRadius) {
@@ -2077,10 +2088,9 @@
                              /*alpha=*/4.0, /*beta=*/0.5);
   m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(
-      m.GetOutput(),
-      ElementsAreArray(ArrayFloatNear(
-          {-0.264926, 0.125109, 0.140112, 0.267261, -0.161788, 0.0244266})));
+  EXPECT_THAT(m.GetOutput(),
+              NnapiArrayFloatNear({-0.264926, 0.125109, 0.140112, 0.267261,
+                                   -0.161788, 0.0244266}));
 }
 
 class LSHProjectionOpModel : public SingleOpModelWithNNAPI {
@@ -2256,7 +2266,7 @@
       3, -2, 10, 1,  //
   });
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({
                                  0, 0, 2, 4,   //
                                  3, 0, 10, 1,  //
                              }));
@@ -2270,7 +2280,7 @@
       0.3, -2.0, 1.1, -0.1,  //
   });
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({
                                  0.0, -0.6, 0.2, -0.4,  //
                                  0.3, -1.0, 1.0, -0.1,  //
                              }));
@@ -2284,7 +2294,7 @@
       3, -2, 10, 1,  //
   });
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({
                                  0, 0, 2, 4,  //
                                  3, 0, 6, 1,  //
                              }));
@@ -2298,10 +2308,10 @@
       3, -2, 10, 1,  //
   });
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({
                                  0.5, 0.002473, 0.880797, 0.982014,       //
                                  0.952574, 0.119203, 0.999955, 0.731059,  //
-                             })));
+                             }));
 }
 
 TEST(NNAPIDelegate, LogisticQuantized) {
@@ -2369,16 +2379,14 @@
   m.SetInput<float>({3, 6});
   m.SetSize({1, 3});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput<float>(),
-              ElementsAreArray(ArrayFloatNear({3, 5, 6})));
+  EXPECT_THAT(m.GetOutput<float>(), NnapiArrayFloatNear({3, 5, 6}));
 }
 
 TEST(ResizeBilinear, HorizontalConstant) {
   ResizeBilinearOpModel const_m({TensorType_FLOAT32, {1, 1, 2, 1}}, {1, 3});
   const_m.SetInput<float>({3, 6});
   ASSERT_EQ(const_m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(const_m.GetOutput<float>(),
-              ElementsAreArray(ArrayFloatNear({3, 5, 6})));
+  EXPECT_THAT(const_m.GetOutput<float>(), NnapiArrayFloatNear({3, 5, 6}));
 }
 
 TEST(ResizeBilinear, Vertical) {
@@ -2386,16 +2394,14 @@
   m.SetInput<float>({3, 9});
   m.SetSize({3, 1});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput<float>(),
-              ElementsAreArray(ArrayFloatNear({3, 7, 9})));
+  EXPECT_THAT(m.GetOutput<float>(), NnapiArrayFloatNear({3, 7, 9}));
 }
 
 TEST(ResizeBilinear, VerticalConstant) {
   ResizeBilinearOpModel const_m({TensorType_FLOAT32, {1, 2, 1, 1}}, {3, 1});
   const_m.SetInput<float>({3, 9});
   ASSERT_EQ(const_m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(const_m.GetOutput<float>(),
-              ElementsAreArray(ArrayFloatNear({3, 7, 9})));
+  EXPECT_THAT(const_m.GetOutput<float>(), NnapiArrayFloatNear({3, 7, 9}));
 }
 
 TEST(ResizeBilinear, TwoDimensional) {
@@ -2406,11 +2412,11 @@
   });
   m.SetSize({3, 3});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({
+  EXPECT_THAT(m.GetOutput<float>(), NnapiArrayFloatNear({
                                         3, 5, 6,    //
                                         7, 9, 10,   //
                                         9, 11, 12,  //
-                                    })));
+                                    }));
 }
 
 TEST(ResizeBilinear, TwoDimensionalConstant) {
@@ -2420,11 +2426,11 @@
       9, 12  //
   });
   ASSERT_EQ(const_m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(const_m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({
+  EXPECT_THAT(const_m.GetOutput<float>(), NnapiArrayFloatNear({
                                               3, 5, 6,    //
                                               7, 9, 10,   //
                                               9, 11, 12,  //
-                                          })));
+                                          }));
 }
 
 template <typename T>
@@ -2487,8 +2493,8 @@
   m.SetInput({1, 2, 3, 4, 5, 6});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.GetOutput(),
-              ElementsAreArray({0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
-                                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
+              NnapiArrayFloatNear({0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
+                                   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
 }
 
@@ -2538,8 +2544,8 @@
   m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 2, 2, 1}));
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
-                                               13, 15, 6, 8, 14, 16}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({1, 3, 9, 11, 2, 4, 10, 12, 5,
+                                                  7, 13, 15, 6, 8, 14, 16}));
 }
 
 TEST(NNAPIDelegate, SpaceToBatchNDMultipleInputBatchesConstTest) {
@@ -2547,8 +2553,8 @@
   m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8, 1, 2, 1}));
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
-                                               13, 15, 6, 8, 14, 16}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({1, 3, 9, 11, 2, 4, 10, 12, 5,
+                                                  7, 13, 15, 6, 8, 14, 16}));
 }
 
 TEST(NNAPIDelegate, SpaceToBatchNDSimplePaddingConstTest) {
@@ -2556,7 +2562,7 @@
   m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({
                                  0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
                                  0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10,
                              }));
@@ -2567,7 +2573,7 @@
   m.SetInput({1, 2, 3, 4, 5, 6, 7, 8});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({
                                  0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
                                  0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
                                  0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0,
@@ -2623,7 +2629,7 @@
   m.SetInput({1, 2, 3, 4});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({2, 3}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({2, 3}));
 }
 
 TEST(StridedSliceOpTest, In1D_BeginMask) {
@@ -2631,7 +2637,7 @@
   m.SetInput({1, 2, 3, 4});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3}));
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({1, 2, 3}));
 }
 
 TEST(StridedSliceOpTest, In2D_Stride2) {
@@ -2640,7 +2646,7 @@
   m.SetInput({1, 2, 3, 4, 5, 6});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2}));
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({1, 3}));
 }
 
 TEST(StridedSliceOpTest, In2D_EndMask) {
@@ -2649,7 +2655,7 @@
   m.SetInput({1, 2, 3, 4, 5, 6});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3}));
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({4, 5, 6}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({4, 5, 6}));
 }
 
 TEST(StridedSliceOpTest, In3D_IdentityShrinkAxis4) {
@@ -2658,7 +2664,7 @@
   m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3}));
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 5, 7, 9, 11}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({1, 3, 5, 7, 9, 11}));
 }
 
 static float rnn_input[] = {
@@ -2885,7 +2891,7 @@
     expected.insert(expected.end(), golden_start, golden_end);
     expected.insert(expected.end(), golden_start, golden_end);
 
-    EXPECT_THAT(rnn.GetOutput(), ElementsAreArray(ArrayFloatNear(expected)));
+    EXPECT_THAT(rnn.GetOutput(), NnapiArrayFloatNear(expected));
   }
 }
 
@@ -4803,7 +4809,7 @@
   m.SetInput(data);
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
-  EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({12, 13})));
+  EXPECT_THAT(m.GetOutput<float>(), NnapiArrayFloatNear({12, 13}));
 }
 
 TEST(NNAPIDelegate, MeanFloatKeepDims) {
@@ -4815,8 +4821,7 @@
   m.SetInput(data);
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
-  EXPECT_THAT(m.GetOutput<float>(),
-              ElementsAreArray(ArrayFloatNear({10.5, 12.5, 14.5})));
+  EXPECT_THAT(m.GetOutput<float>(), NnapiArrayFloatNear({10.5, 12.5, 14.5}));
 }
 
 class BaseEmbeddingLookupOpModel : public SingleOpModelWithNNAPI {
@@ -4871,11 +4876,11 @@
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
 
   EXPECT_THAT(m.GetOutput(),
-              ElementsAreArray(ArrayFloatNear({
+              NnapiArrayFloatNear({
                   1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,  // Row 1
                   0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,  // Row 0
                   2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,  // Row 2
-              })));
+              }));
 }
 
 class HashtableLookupOpModel : public SingleOpModelWithNNAPI {
@@ -4955,12 +4960,12 @@
 
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
 
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({
                                  2.0, 2.1,  // 2-nd item
                                  0, 0,      // Not found
                                  0.0, 0.1,  // 0-th item
                                  1.0, 1.1,  // 1-st item
-                             })));
+                             }));
   EXPECT_THAT(m.GetHit(), ElementsAreArray({
                               1,
                               0,
@@ -4978,12 +4983,12 @@
 
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
 
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({
                                  0.4,  // 2-nd item
                                  0,    // Not found
                                  0.0,  // 0-th item
                                  0.1,  // 1-st item
-                             })));
+                             }));
   EXPECT_THAT(m.GetHit(), ElementsAreArray({
                               1,
                               0,
@@ -5039,7 +5044,7 @@
   });
   m.SetAlpha({0.0f, 1.0f, 2.0f});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({
                                  0.0f, 0.0f, 0.0f,    // Row 1, Column 1
                                  1.0f, 1.0f, 1.0f,    // Row 1, Column 2
                                  0.0f, -1.0f, -2.0f,  // Row 2, Column 1
@@ -5150,8 +5155,8 @@
                              {TensorType_FLOAT32});
   m.SetInput({1, 2, 3, 4});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4,
-                                               0, 0, 0, 0, 0}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({0, 0, 0, 0, 0, 1, 2, 0, 0, 3,
+                                                  4, 0, 0, 0, 0, 0}));
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
 }
 
@@ -5162,8 +5167,8 @@
                              {0, 0, 1, 1, 1, 1, 0, 0}, 5, {TensorType_FLOAT32});
   m.SetInput({1, 2, 3, 4});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({5, 5, 5, 5, 5, 1, 2, 5, 5, 3, 4,
-                                               5, 5, 5, 5, 5}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({5, 5, 5, 5, 5, 1, 2, 5, 5, 3,
+                                                  4, 5, 5, 5, 5, 5}));
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
 }
 
@@ -5174,7 +5179,7 @@
                              {0, 1, 0, 0, 0, 0, 0, 1}, 5, {TensorType_FLOAT32});
   m.SetInput({3, 3});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 5, 3, 5, 5, 5, 5, 5}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({3, 5, 3, 5, 5, 5, 5, 5}));
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 1, 2, 2}));
 }
 
@@ -5184,8 +5189,8 @@
   m.SetInput({1, 2, 3, 4});
   m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4,
-                                               0, 0, 0, 0, 0}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({0, 0, 0, 0, 0, 1, 2, 0, 0, 3,
+                                                  4, 0, 0, 0, 0, 0}));
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
 }
 
@@ -5195,8 +5200,8 @@
   m.SetInput({1, 2, 3, 4});
   m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
-  EXPECT_THAT(m.GetOutput(), ElementsAreArray({5, 5, 5, 5, 5, 1, 2, 5, 5, 3, 4,
-                                               5, 5, 5, 5, 5}));
+  EXPECT_THAT(m.GetOutput(), NnapiArrayFloatNear({5, 5, 5, 5, 5, 1, 2, 5, 5, 3,
+                                                  4, 5, 5, 5, 5, 5}));
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
 }
 
@@ -5206,8 +5211,8 @@
   m.SetInput({1, 2, 3, 4, 5, 6});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.GetOutput(),
-              ElementsAreArray({0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
-                                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
+              NnapiArrayFloatNear({0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
+                                   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
 }
 
@@ -5218,8 +5223,8 @@
   m.SetPaddings({0, 0, 0, 2, 1, 3, 0, 0});
   ASSERT_EQ(m.Invoke(), kTfLiteOk);
   EXPECT_THAT(m.GetOutput(),
-              ElementsAreArray({0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
-                                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
+              NnapiArrayFloatNear({0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
+                                   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
 }
 
diff --git a/third_party/absl/abseil-cpp/Android.bp b/third_party/absl/abseil-cpp/Android.bp
index 0b0ea6b..c45a840 100644
--- a/third_party/absl/abseil-cpp/Android.bp
+++ b/third_party/absl/abseil-cpp/Android.bp
@@ -30,29 +30,53 @@
     ],
 }
 
+filegroup {
+    name: "absl_srcs",
+    srcs: [
+        "absl/**/*.cc",
+    ],
+    exclude_srcs: [
+        "absl/random/**/*.cc",
+        "**/*_test.cc",
+        "**/*_test_common.cc",
+        "**/*test_util.cc",
+        "**/*test_utils.cc",
+        "**/*benchmark.cc",
+        "absl/hash/internal/print_hash_of.cc",
+    ],
+}
+
+// Only include necessary random files here. If include "absl/random/*.cc", we
+// will get "duplicate symbol: main" error because both
+// gaussian_distribution_gentables.cc and randen_benchmarks.cc have main
+// function.
+filegroup {
+    name: "absl_random",
+    srcs: [
+        "absl/random/discrete_distribution.cc",
+        "absl/random/gaussian_distribution.cc",
+        "absl/random/seed_sequences.cc",
+        "absl/random/internal/pool_urbg.cc",
+        "absl/random/internal/seed_material.cc",
+    ],
+}
+
 cc_library_static {
     name: "tensorflow_abseil",
     export_include_dirs: ["."],
     visibility: [
         "//external/tensorflow:__subpackages__",
+        "//external/federated-compute:__subpackages__",
         "//packages/modules/OnDevicePersonalization:__subpackages__",
     ],
     apex_available: [
         "com.android.ondevicepersonalization",
     ],
     srcs: [
-        "absl/**/*.cc",
+        ":absl_srcs",
+        ":absl_random",
     ],
     sdk_version: "current",
     min_sdk_version: "30",
     stl: "libc++_static",
-    exclude_srcs: [
-        "**/*_test.cc",
-        "**/*_test_common.cc",
-        "**/*test_util.cc",
-        "**/*test_utils.cc",
-        "**/*benchmark.cc",
-        "absl/random/benchmarks.cc",
-        "absl/hash/internal/print_hash_of.cc",
-    ],
 }