Remove base/metrics and some related things

Change-Id: I47706be34e135d9941bf849297bb2fc5010c88d0
Reviewed-on: https://gn-review.googlesource.com/1420
Commit-Queue: Scott Graham <scottmg@chromium.org>
Reviewed-by: Brett Wilson <brettw@chromium.org>
diff --git a/base/debug/activity_tracker.cc b/base/debug/activity_tracker.cc
deleted file mode 100644
index bb1349c..0000000
--- a/base/debug/activity_tracker.cc
+++ /dev/null
@@ -1,1828 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/debug/activity_tracker.h"
-
-#include <algorithm>
-#include <limits>
-#include <utility>
-
-#include "base/atomic_sequence_num.h"
-#include "base/debug/stack_trace.h"
-#include "base/files/file.h"
-#include "base/files/file_path.h"
-#include "base/files/memory_mapped_file.h"
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/metrics/field_trial.h"
-#include "base/metrics/histogram_macros.h"
-#include "base/pending_task.h"
-#include "base/pickle.h"
-#include "base/process/process.h"
-#include "base/process/process_handle.h"
-#include "base/stl_util.h"
-#include "base/strings/string_util.h"
-#include "base/strings/utf_string_conversions.h"
-#include "base/threading/platform_thread.h"
-#include "build_config.h"
-
-namespace base {
-namespace debug {
-
-namespace {
-
-// The minimum depth a stack should support.
-const int kMinStackDepth = 2;
-
-// The amount of memory set aside for holding arbitrary user data (key/value
-// pairs) globally or associated with ActivityData entries.
-const size_t kUserDataSize = 1 << 10;     // 1 KiB
-const size_t kProcessDataSize = 4 << 10;  // 4 KiB
-const size_t kMaxUserDataNameLength =
-    static_cast<size_t>(std::numeric_limits<uint8_t>::max());
-
-// A constant used to indicate that module information is changing.
-const uint32_t kModuleInformationChanging = 0x80000000;
-
-// The key used to record process information.
-const char kProcessPhaseDataKey[] = "process-phase";
-
-// An atomically incrementing number, used to check for recreations of objects
-// in the same memory space.
-AtomicSequenceNumber g_next_id;
-
-union ThreadRef {
-  int64_t as_id;
-#if defined(OS_WIN)
-  // On Windows, the handle itself is often a pseudo-handle with a common
-  // value meaning "this thread" and so the thread-id is used. The former
-  // can be converted to a thread-id with a system call.
-  PlatformThreadId as_tid;
-#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
-  // On Posix and Fuchsia, the handle is always a unique identifier so no
-  // conversion needs to be done. However, its value is officially opaque so
-  // there is no one correct way to convert it to a numerical identifier.
-  PlatformThreadHandle::Handle as_handle;
-#endif
-};
-
-// Gets the next non-zero identifier. It is only unique within a process.
-uint32_t GetNextDataId() {
-  uint32_t id;
-  while ((id = g_next_id.GetNext()) == 0)
-    ;
-  return id;
-}
-
-// Gets the current process-id, either from the GlobalActivityTracker if it
-// exists (where the PID can be defined for testing) or from the system if
-// there isn't such.
-int64_t GetProcessId() {
-  GlobalActivityTracker* global = GlobalActivityTracker::Get();
-  if (global)
-    return global->process_id();
-  return GetCurrentProcId();
-}
-
-// Finds and reuses a specific allocation or creates a new one.
-PersistentMemoryAllocator::Reference AllocateFrom(
-    PersistentMemoryAllocator* allocator,
-    uint32_t from_type,
-    size_t size,
-    uint32_t to_type) {
-  PersistentMemoryAllocator::Iterator iter(allocator);
-  PersistentMemoryAllocator::Reference ref;
-  while ((ref = iter.GetNextOfType(from_type)) != 0) {
-    DCHECK_LE(size, allocator->GetAllocSize(ref));
-    // This can fail if a another thread has just taken it. It is assumed that
-    // the memory is cleared during the "free" operation.
-    if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false))
-      return ref;
-  }
-
-  return allocator->Allocate(size, to_type);
-}
-
-// Determines the previous aligned index.
-size_t RoundDownToAlignment(size_t index, size_t alignment) {
-  return index & (0 - alignment);
-}
-
-// Determines the next aligned index.
-size_t RoundUpToAlignment(size_t index, size_t alignment) {
-  return (index + (alignment - 1)) & (0 - alignment);
-}
-
-// Converts "tick" timing into wall time.
-Time WallTimeFromTickTime(int64_t ticks_start, int64_t ticks, Time time_start) {
-  return time_start + TimeDelta::FromInternalValue(ticks - ticks_start);
-}
-
-}  // namespace
-
-OwningProcess::OwningProcess() = default;
-OwningProcess::~OwningProcess() = default;
-
-void OwningProcess::Release_Initialize(int64_t pid) {
-  uint32_t old_id = data_id.load(std::memory_order_acquire);
-  DCHECK_EQ(0U, old_id);
-  process_id = pid != 0 ? pid : GetProcessId();
-  create_stamp = Time::Now().ToInternalValue();
-  data_id.store(GetNextDataId(), std::memory_order_release);
-}
-
-void OwningProcess::SetOwningProcessIdForTesting(int64_t pid, int64_t stamp) {
-  DCHECK_NE(0U, data_id);
-  process_id = pid;
-  create_stamp = stamp;
-}
-
-// static
-bool OwningProcess::GetOwningProcessId(const void* memory,
-                                       int64_t* out_id,
-                                       int64_t* out_stamp) {
-  const OwningProcess* info = reinterpret_cast<const OwningProcess*>(memory);
-  uint32_t id = info->data_id.load(std::memory_order_acquire);
-  if (id == 0)
-    return false;
-
-  *out_id = info->process_id;
-  *out_stamp = info->create_stamp;
-  return id == info->data_id.load(std::memory_order_seq_cst);
-}
-
-// It doesn't matter what is contained in this (though it will be all zeros)
-// as only the address of it is important.
-const ActivityData kNullActivityData = {};
-
-ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) {
-  ThreadRef thread_ref;
-  thread_ref.as_id = 0;  // Zero the union in case other is smaller.
-#if defined(OS_WIN)
-  thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
-#elif defined(OS_POSIX)
-  thread_ref.as_handle = handle.platform_handle();
-#endif
-  return ForThread(thread_ref.as_id);
-}
-
-ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator(
-    PersistentMemoryAllocator* allocator,
-    uint32_t object_type,
-    uint32_t object_free_type,
-    size_t object_size,
-    size_t cache_size,
-    bool make_iterable)
-    : allocator_(allocator),
-      object_type_(object_type),
-      object_free_type_(object_free_type),
-      object_size_(object_size),
-      cache_size_(cache_size),
-      make_iterable_(make_iterable),
-      iterator_(allocator),
-      cache_values_(new Reference[cache_size]),
-      cache_used_(0) {
-  DCHECK(allocator);
-}
-
-ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() = default;
-
-ActivityTrackerMemoryAllocator::Reference
-ActivityTrackerMemoryAllocator::GetObjectReference() {
-  // First see if there is a cached value that can be returned. This is much
-  // faster than searching the memory system for free blocks.
-  while (cache_used_ > 0) {
-    Reference cached = cache_values_[--cache_used_];
-    // Change the type of the cached object to the proper type and return it.
-    // If the type-change fails that means another thread has taken this from
-    // under us (via the search below) so ignore it and keep trying. Don't
-    // clear the memory because that was done when the type was made "free".
-    if (allocator_->ChangeType(cached, object_type_, object_free_type_, false))
-      return cached;
-  }
-
-  // Fetch the next "free" object from persistent memory. Rather than restart
-  // the iterator at the head each time and likely waste time going again
-  // through objects that aren't relevant, the iterator continues from where
-  // it last left off and is only reset when the end is reached. If the
-  // returned reference matches |last|, then it has wrapped without finding
-  // anything.
-  const Reference last = iterator_.GetLast();
-  while (true) {
-    uint32_t type;
-    Reference found = iterator_.GetNext(&type);
-    if (found && type == object_free_type_) {
-      // Found a free object. Change it to the proper type and return it. If
-      // the type-change fails that means another thread has taken this from
-      // under us so ignore it and keep trying.
-      if (allocator_->ChangeType(found, object_type_, object_free_type_, false))
-        return found;
-    }
-    if (found == last) {
-      // Wrapped. No desired object was found.
-      break;
-    }
-    if (!found) {
-      // Reached end; start over at the beginning.
-      iterator_.Reset();
-    }
-  }
-
-  // No free block was found so instead allocate a new one.
-  Reference allocated = allocator_->Allocate(object_size_, object_type_);
-  if (allocated && make_iterable_)
-    allocator_->MakeIterable(allocated);
-  return allocated;
-}
-
-void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) {
-  // Mark object as free.
-  bool success = allocator_->ChangeType(ref, object_free_type_, object_type_,
-                                        /*clear=*/true);
-  DCHECK(success);
-
-  // Add this reference to our "free" cache if there is space. If not, the type
-  // has still been changed to indicate that it is free so this (or another)
-  // thread can find it, albeit more slowly, using the iteration method above.
-  if (cache_used_ < cache_size_)
-    cache_values_[cache_used_++] = ref;
-}
-
-// static
-void Activity::FillFrom(Activity* activity,
-                        const void* program_counter,
-                        const void* origin,
-                        Type type,
-                        const ActivityData& data) {
-  activity->time_internal = base::TimeTicks::Now().ToInternalValue();
-  activity->calling_address = reinterpret_cast<uintptr_t>(program_counter);
-  activity->origin_address = reinterpret_cast<uintptr_t>(origin);
-  activity->activity_type = type;
-  activity->data = data;
-
-#if (!defined(OS_NACL) && DCHECK_IS_ON()) || defined(ADDRESS_SANITIZER)
-  // Create a stacktrace from the current location and get the addresses for
-  // improved debuggability.
-  StackTrace stack_trace;
-  size_t stack_depth;
-  const void* const* stack_addrs = stack_trace.Addresses(&stack_depth);
-  // Copy the stack addresses, ignoring the first one (here).
-  size_t i;
-  for (i = 1; i < stack_depth && i < kActivityCallStackSize; ++i) {
-    activity->call_stack[i - 1] = reinterpret_cast<uintptr_t>(stack_addrs[i]);
-  }
-  activity->call_stack[i - 1] = 0;
-#else
-  activity->call_stack[0] = 0;
-#endif
-}
-
-ActivityUserData::TypedValue::TypedValue() = default;
-ActivityUserData::TypedValue::TypedValue(const TypedValue& other) = default;
-ActivityUserData::TypedValue::~TypedValue() = default;
-
-StringPiece ActivityUserData::TypedValue::Get() const {
-  DCHECK_EQ(RAW_VALUE, type_);
-  return long_value_;
-}
-
-StringPiece ActivityUserData::TypedValue::GetString() const {
-  DCHECK_EQ(STRING_VALUE, type_);
-  return long_value_;
-}
-
-bool ActivityUserData::TypedValue::GetBool() const {
-  DCHECK_EQ(BOOL_VALUE, type_);
-  return short_value_ != 0;
-}
-
-char ActivityUserData::TypedValue::GetChar() const {
-  DCHECK_EQ(CHAR_VALUE, type_);
-  return static_cast<char>(short_value_);
-}
-
-int64_t ActivityUserData::TypedValue::GetInt() const {
-  DCHECK_EQ(SIGNED_VALUE, type_);
-  return static_cast<int64_t>(short_value_);
-}
-
-uint64_t ActivityUserData::TypedValue::GetUint() const {
-  DCHECK_EQ(UNSIGNED_VALUE, type_);
-  return static_cast<uint64_t>(short_value_);
-}
-
-StringPiece ActivityUserData::TypedValue::GetReference() const {
-  DCHECK_EQ(RAW_VALUE_REFERENCE, type_);
-  return ref_value_;
-}
-
-StringPiece ActivityUserData::TypedValue::GetStringReference() const {
-  DCHECK_EQ(STRING_VALUE_REFERENCE, type_);
-  return ref_value_;
-}
-
-// These are required because std::atomic is (currently) not a POD type and
-// thus clang requires explicit out-of-line constructors and destructors even
-// when they do nothing.
-ActivityUserData::ValueInfo::ValueInfo() = default;
-ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
-ActivityUserData::ValueInfo::~ValueInfo() = default;
-ActivityUserData::MemoryHeader::MemoryHeader() = default;
-ActivityUserData::MemoryHeader::~MemoryHeader() = default;
-ActivityUserData::FieldHeader::FieldHeader() = default;
-ActivityUserData::FieldHeader::~FieldHeader() = default;
-
-ActivityUserData::ActivityUserData() : ActivityUserData(nullptr, 0, -1) {}
-
-ActivityUserData::ActivityUserData(void* memory, size_t size, int64_t pid)
-    : memory_(reinterpret_cast<char*>(memory)),
-      available_(RoundDownToAlignment(size, kMemoryAlignment)),
-      header_(reinterpret_cast<MemoryHeader*>(memory)),
-      orig_data_id(0),
-      orig_process_id(0),
-      orig_create_stamp(0) {
-  // It's possible that no user data is being stored.
-  if (!memory_)
-    return;
-
-  static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
-  DCHECK_LT(sizeof(MemoryHeader), available_);
-  if (header_->owner.data_id.load(std::memory_order_acquire) == 0)
-    header_->owner.Release_Initialize(pid);
-  memory_ += sizeof(MemoryHeader);
-  available_ -= sizeof(MemoryHeader);
-
-  // Make a copy of identifying information for later comparison.
-  *const_cast<uint32_t*>(&orig_data_id) =
-      header_->owner.data_id.load(std::memory_order_acquire);
-  *const_cast<int64_t*>(&orig_process_id) = header_->owner.process_id;
-  *const_cast<int64_t*>(&orig_create_stamp) = header_->owner.create_stamp;
-
-  // If there is already data present, load that. This allows the same class
-  // to be used for analysis through snapshots.
-  ImportExistingData();
-}
-
-ActivityUserData::~ActivityUserData() = default;
-
-bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
-  DCHECK(output_snapshot);
-  DCHECK(output_snapshot->empty());
-
-  // Find any new data that may have been added by an active instance of this
-  // class that is adding records.
-  ImportExistingData();
-
-  // Add all the values to the snapshot.
-  for (const auto& entry : values_) {
-    TypedValue value;
-    const size_t size = entry.second.size_ptr->load(std::memory_order_acquire);
-    value.type_ = entry.second.type;
-    DCHECK_GE(entry.second.extent, size);
-
-    switch (entry.second.type) {
-      case RAW_VALUE:
-      case STRING_VALUE:
-        value.long_value_ =
-            std::string(reinterpret_cast<char*>(entry.second.memory), size);
-        break;
-      case RAW_VALUE_REFERENCE:
-      case STRING_VALUE_REFERENCE: {
-        ReferenceRecord* ref =
-            reinterpret_cast<ReferenceRecord*>(entry.second.memory);
-        value.ref_value_ = StringPiece(
-            reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
-            static_cast<size_t>(ref->size));
-      } break;
-      case BOOL_VALUE:
-      case CHAR_VALUE:
-        value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
-        break;
-      case SIGNED_VALUE:
-      case UNSIGNED_VALUE:
-        value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
-        break;
-      case END_OF_VALUES:  // Included for completeness purposes.
-        NOTREACHED();
-    }
-    auto inserted = output_snapshot->insert(
-        std::make_pair(entry.second.name.as_string(), std::move(value)));
-    DCHECK(inserted.second);  // True if inserted, false if existed.
-  }
-
-  // Another import attempt will validate that the underlying memory has not
-  // been reused for another purpose. Entries added since the first import
-  // will be ignored here but will be returned if another snapshot is created.
-  ImportExistingData();
-  if (!memory_) {
-    output_snapshot->clear();
-    return false;
-  }
-
-  // Successful snapshot.
-  return true;
-}
-
-const void* ActivityUserData::GetBaseAddress() const {
-  // The |memory_| pointer advances as elements are written but the |header_|
-  // value is always at the start of the block so just return that.
-  return header_;
-}
-
-void ActivityUserData::SetOwningProcessIdForTesting(int64_t pid,
-                                                    int64_t stamp) {
-  if (!header_)
-    return;
-  header_->owner.SetOwningProcessIdForTesting(pid, stamp);
-}
-
-// static
-bool ActivityUserData::GetOwningProcessId(const void* memory,
-                                          int64_t* out_id,
-                                          int64_t* out_stamp) {
-  const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory);
-  return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
-}
-
-void ActivityUserData::Set(StringPiece name,
-                           ValueType type,
-                           const void* memory,
-                           size_t size) {
-  DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length());
-  size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1),
-                  size);
-
-  // It's possible that no user data is being stored.
-  if (!memory_)
-    return;
-
-  // The storage of a name is limited so use that limit during lookup.
-  if (name.length() > kMaxUserDataNameLength)
-    name.set(name.data(), kMaxUserDataNameLength);
-
-  ValueInfo* info;
-  auto existing = values_.find(name);
-  if (existing != values_.end()) {
-    info = &existing->second;
-  } else {
-    // The name size is limited to what can be held in a single byte but
-    // because there are not alignment constraints on strings, it's set tight
-    // against the header. Its extent (the reserved space, even if it's not
-    // all used) is calculated so that, when pressed against the header, the
-    // following field will be aligned properly.
-    size_t name_size = name.length();
-    size_t name_extent =
-        RoundUpToAlignment(sizeof(FieldHeader) + name_size, kMemoryAlignment) -
-        sizeof(FieldHeader);
-    size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
-
-    // The "base size" is the size of the header and (padded) string key. Stop
-    // now if there's not room enough for even this.
-    size_t base_size = sizeof(FieldHeader) + name_extent;
-    if (base_size > available_)
-      return;
-
-    // The "full size" is the size for storing the entire value.
-    size_t full_size = std::min(base_size + value_extent, available_);
-
-    // If the value is actually a single byte, see if it can be stuffed at the
-    // end of the name extent rather than wasting kMemoryAlignment bytes.
-    if (size == 1 && name_extent > name_size) {
-      full_size = base_size;
-      --name_extent;
-      --base_size;
-    }
-
-    // Truncate the stored size to the amount of available memory. Stop now if
-    // there's not any room for even part of the value.
-    if (size != 0) {
-      size = std::min(full_size - base_size, size);
-      if (size == 0)
-        return;
-    }
-
-    // Allocate a chunk of memory.
-    FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
-    memory_ += full_size;
-    available_ -= full_size;
-
-    // Datafill the header and name records. Memory must be zeroed. The |type|
-    // is written last, atomically, to release all the other values.
-    DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed));
-    DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
-    header->name_size = static_cast<uint8_t>(name_size);
-    header->record_size = full_size;
-    char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader);
-    void* value_memory =
-        reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent;
-    memcpy(name_memory, name.data(), name_size);
-    header->type.store(type, std::memory_order_release);
-
-    // Create an entry in |values_| so that this field can be found and changed
-    // later on without having to allocate new entries.
-    StringPiece persistent_name(name_memory, name_size);
-    auto inserted =
-        values_.insert(std::make_pair(persistent_name, ValueInfo()));
-    DCHECK(inserted.second);  // True if inserted, false if existed.
-    info = &inserted.first->second;
-    info->name = persistent_name;
-    info->memory = value_memory;
-    info->size_ptr = &header->value_size;
-    info->extent = full_size - sizeof(FieldHeader) - name_extent;
-    info->type = type;
-  }
-
-  // Copy the value data to storage. The |size| is written last, atomically, to
-  // release the copied data. Until then, a parallel reader will just ignore
-  // records with a zero size.
-  DCHECK_EQ(type, info->type);
-  size = std::min(size, info->extent);
-  info->size_ptr->store(0, std::memory_order_seq_cst);
-  memcpy(info->memory, memory, size);
-  info->size_ptr->store(size, std::memory_order_release);
-}
-
-void ActivityUserData::SetReference(StringPiece name,
-                                    ValueType type,
-                                    const void* memory,
-                                    size_t size) {
-  ReferenceRecord rec;
-  rec.address = reinterpret_cast<uintptr_t>(memory);
-  rec.size = size;
-  Set(name, type, &rec, sizeof(rec));
-}
-
-void ActivityUserData::ImportExistingData() const {
-  // It's possible that no user data is being stored.
-  if (!memory_)
-    return;
-
-  while (available_ > sizeof(FieldHeader)) {
-    FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
-    ValueType type =
-        static_cast<ValueType>(header->type.load(std::memory_order_acquire));
-    if (type == END_OF_VALUES)
-      return;
-    if (header->record_size > available_)
-      return;
-
-    size_t value_offset = RoundUpToAlignment(
-        sizeof(FieldHeader) + header->name_size, kMemoryAlignment);
-    if (header->record_size == value_offset &&
-        header->value_size.load(std::memory_order_relaxed) == 1) {
-      value_offset -= 1;
-    }
-    if (value_offset + header->value_size > header->record_size)
-      return;
-
-    ValueInfo info;
-    info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size);
-    info.type = type;
-    info.memory = memory_ + value_offset;
-    info.size_ptr = &header->value_size;
-    info.extent = header->record_size - value_offset;
-
-    StringPiece key(info.name);
-    values_.insert(std::make_pair(key, std::move(info)));
-
-    memory_ += header->record_size;
-    available_ -= header->record_size;
-  }
-
-  // Check if memory has been completely reused.
-  if (header_->owner.data_id.load(std::memory_order_acquire) != orig_data_id ||
-      header_->owner.process_id != orig_process_id ||
-      header_->owner.create_stamp != orig_create_stamp) {
-    memory_ = nullptr;
-    values_.clear();
-  }
-}
-
-// This information is kept for every thread that is tracked. It is filled
-// the very first time the thread is seen. All fields must be of exact sizes
-// so there is no issue moving between 32 and 64-bit builds.
-struct ThreadActivityTracker::Header {
-  // Defined in .h for analyzer access. Increment this if structure changes!
-  static constexpr uint32_t kPersistentTypeId =
-      GlobalActivityTracker::kTypeIdActivityTracker;
-
-  // Expected size for 32/64-bit check.
-  static constexpr size_t kExpectedInstanceSize =
-      OwningProcess::kExpectedInstanceSize + Activity::kExpectedInstanceSize +
-      72;
-
-  // This information uniquely identifies a process.
-  OwningProcess owner;
-
-  // The thread-id (thread_ref.as_id) to which this data belongs. This number
-  // is not guaranteed to mean anything but combined with the process-id from
-  // OwningProcess is unique among all active trackers.
-  ThreadRef thread_ref;
-
-  // The start-time and start-ticks when the data was created. Each activity
-  // record has a |time_internal| value that can be converted to a "wall time"
-  // with these two values.
-  int64_t start_time;
-  int64_t start_ticks;
-
-  // The number of Activity slots (spaces that can hold an Activity) that
-  // immediately follow this structure in memory.
-  uint32_t stack_slots;
-
-  // Some padding to keep everything 64-bit aligned.
-  uint32_t padding;
-
-  // The current depth of the stack. This may be greater than the number of
-  // slots. If the depth exceeds the number of slots, the newest entries
-  // won't be recorded.
-  std::atomic<uint32_t> current_depth;
-
-  // A memory location used to indicate if changes have been made to the data
-  // that would invalidate an in-progress read of its contents. The active
-  // tracker will increment the value whenever something gets popped from the
-  // stack. A monitoring tracker can check the value before and after access
-  // to know, if it's still the same, that the contents didn't change while
-  // being copied.
-  std::atomic<uint32_t> data_version;
-
-  // The last "exception" activity. This can't be stored on the stack because
-  // that could get popped as things unwind.
-  Activity last_exception;
-
-  // The name of the thread (up to a maximum length). Dynamic-length names
-  // are not practical since the memory has to come from the same persistent
-  // allocator that holds this structure and to which this object has no
-  // reference.
-  char thread_name[32];
-};
-
-ThreadActivityTracker::Snapshot::Snapshot() = default;
-ThreadActivityTracker::Snapshot::~Snapshot() = default;
-
-ThreadActivityTracker::ScopedActivity::ScopedActivity(
-    ThreadActivityTracker* tracker,
-    const void* program_counter,
-    const void* origin,
-    Activity::Type type,
-    const ActivityData& data)
-    : tracker_(tracker) {
-  if (tracker_)
-    activity_id_ = tracker_->PushActivity(program_counter, origin, type, data);
-}
-
-ThreadActivityTracker::ScopedActivity::~ScopedActivity() {
-  if (tracker_)
-    tracker_->PopActivity(activity_id_);
-}
-
-void ThreadActivityTracker::ScopedActivity::ChangeTypeAndData(
-    Activity::Type type,
-    const ActivityData& data) {
-  if (tracker_)
-    tracker_->ChangeActivity(activity_id_, type, data);
-}
-
-ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
-    : header_(static_cast<Header*>(base)),
-      stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) +
-                                         sizeof(Header))),
-#if DCHECK_IS_ON()
-      thread_id_(PlatformThreadRef()),
-#endif
-      stack_slots_(
-          static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) {
-
-  // Verify the parameters but fail gracefully if they're not valid so that
-  // production code based on external inputs will not crash.  IsValid() will
-  // return false in this case.
-  if (!base ||
-      // Ensure there is enough space for the header and at least a few records.
-      size < sizeof(Header) + kMinStackDepth * sizeof(Activity) ||
-      // Ensure that the |stack_slots_| calculation didn't overflow.
-      (size - sizeof(Header)) / sizeof(Activity) >
-          std::numeric_limits<uint32_t>::max()) {
-    NOTREACHED();
-    return;
-  }
-
-  // Ensure that the thread reference doesn't exceed the size of the ID number.
-  // This won't compile at the global scope because Header is a private struct.
-  static_assert(
-      sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id),
-      "PlatformThreadHandle::Handle is too big to hold in 64-bit ID");
-
-  // Ensure that the alignment of Activity.data is properly aligned to a
-  // 64-bit boundary so there are no interoperability-issues across cpu
-  // architectures.
-  static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0,
-                "ActivityData.data is not 64-bit aligned");
-
-  // Provided memory should either be completely initialized or all zeros.
-  if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) {
-    // This is a new file. Double-check other fields and then initialize.
-    DCHECK_EQ(0, header_->owner.process_id);
-    DCHECK_EQ(0, header_->owner.create_stamp);
-    DCHECK_EQ(0, header_->thread_ref.as_id);
-    DCHECK_EQ(0, header_->start_time);
-    DCHECK_EQ(0, header_->start_ticks);
-    DCHECK_EQ(0U, header_->stack_slots);
-    DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
-    DCHECK_EQ(0U, header_->data_version.load(std::memory_order_relaxed));
-    DCHECK_EQ(0, stack_[0].time_internal);
-    DCHECK_EQ(0U, stack_[0].origin_address);
-    DCHECK_EQ(0U, stack_[0].call_stack[0]);
-    DCHECK_EQ(0U, stack_[0].data.task.sequence_id);
-
-#if defined(OS_WIN)
-    header_->thread_ref.as_tid = PlatformThread::CurrentId();
-#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
-    header_->thread_ref.as_handle =
-        PlatformThread::CurrentHandle().platform_handle();
-#endif
-
-    header_->start_time = base::Time::Now().ToInternalValue();
-    header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
-    header_->stack_slots = stack_slots_;
-    strlcpy(header_->thread_name, PlatformThread::GetName(),
-            sizeof(header_->thread_name));
-
-    // This is done last so as to guarantee that everything above is "released"
-    // by the time this value gets written.
-    header_->owner.Release_Initialize();
-
-    valid_ = true;
-    DCHECK(IsValid());
-  } else {
-    // This is a file with existing data. Perform basic consistency checks.
-    valid_ = true;
-    valid_ = IsValid();
-  }
-}
-
-ThreadActivityTracker::~ThreadActivityTracker() = default;
-
-ThreadActivityTracker::ActivityId ThreadActivityTracker::PushActivity(
-    const void* program_counter,
-    const void* origin,
-    Activity::Type type,
-    const ActivityData& data) {
-  // A thread-checker creates a lock to check the thread-id which means
-  // re-entry into this code if lock acquisitions are being tracked.
-  DCHECK(type == Activity::ACT_LOCK_ACQUIRE || CalledOnValidThread());
-
-  // Get the current depth of the stack. No access to other memory guarded
-  // by this variable is done here so a "relaxed" load is acceptable.
-  uint32_t depth = header_->current_depth.load(std::memory_order_relaxed);
-
-  // Handle the case where the stack depth has exceeded the storage capacity.
-  // Extra entries will be lost leaving only the base of the stack.
-  if (depth >= stack_slots_) {
-    // Since no other threads modify the data, no compare/exchange is needed.
-    // Since no other memory is being modified, a "relaxed" store is acceptable.
-    header_->current_depth.store(depth + 1, std::memory_order_relaxed);
-    return depth;
-  }
-
-  // Get a pointer to the next activity and load it. No atomicity is required
-  // here because the memory is known only to this thread. It will be made
-  // known to other threads once the depth is incremented.
-  Activity::FillFrom(&stack_[depth], program_counter, origin, type, data);
-
-  // Save the incremented depth. Because this guards |activity| memory filled
-  // above that may be read by another thread once the recorded depth changes,
-  // a "release" store is required.
-  header_->current_depth.store(depth + 1, std::memory_order_release);
-
-  // The current depth is used as the activity ID because it simply identifies
-  // an entry. Once an entry is pop'd, it's okay to reuse the ID.
-  return depth;
-}
-
-void ThreadActivityTracker::ChangeActivity(ActivityId id,
-                                           Activity::Type type,
-                                           const ActivityData& data) {
-  DCHECK(CalledOnValidThread());
-  DCHECK(type != Activity::ACT_NULL || &data != &kNullActivityData);
-  DCHECK_LT(id, header_->current_depth.load(std::memory_order_acquire));
-
-  // Update the information if it is being recorded (i.e. within slot limit).
-  if (id < stack_slots_) {
-    Activity* activity = &stack_[id];
-
-    if (type != Activity::ACT_NULL) {
-      DCHECK_EQ(activity->activity_type & Activity::ACT_CATEGORY_MASK,
-                type & Activity::ACT_CATEGORY_MASK);
-      activity->activity_type = type;
-    }
-
-    if (&data != &kNullActivityData)
-      activity->data = data;
-  }
-}
-
-void ThreadActivityTracker::PopActivity(ActivityId id) {
-  // Do an atomic decrement of the depth. No changes to stack entries guarded
-  // by this variable are done here so a "relaxed" operation is acceptable.
-  // |depth| will receive the value BEFORE it was modified which means the
-  // return value must also be decremented. The slot will be "free" after
-  // this call but since only a single thread can access this object, the
-  // data will remain valid until this method returns or calls outside.
-  uint32_t depth =
-      header_->current_depth.fetch_sub(1, std::memory_order_relaxed) - 1;
-
-  // Validate that everything is running correctly.
-  DCHECK_EQ(id, depth);
-
-  // A thread-checker creates a lock to check the thread-id which means
-  // re-entry into this code if lock acquisitions are being tracked.
-  DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE ||
-         CalledOnValidThread());
-
-  // The stack has shrunk meaning that some other thread trying to copy the
-  // contents for reporting purposes could get bad data. Increment the data
-  // version so that it con tell that things have changed. This needs to
-  // happen after the atomic |depth| operation above so a "release" store
-  // is required.
-  header_->data_version.fetch_add(1, std::memory_order_release);
-}
-
-std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
-    ActivityId id,
-    ActivityTrackerMemoryAllocator* allocator) {
-  // Don't allow user data for lock acquisition as recursion may occur.
-  if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
-    NOTREACHED();
-    return std::make_unique<ActivityUserData>();
-  }
-
-  // User-data is only stored for activities actually held in the stack.
-  if (id >= stack_slots_)
-    return std::make_unique<ActivityUserData>();
-
-  // Create and return a real UserData object.
-  return CreateUserDataForActivity(&stack_[id], allocator);
-}
-
-bool ThreadActivityTracker::HasUserData(ActivityId id) {
-  // User-data is only stored for activities actually held in the stack.
-  return (id < stack_slots_ && stack_[id].user_data_ref);
-}
-
-void ThreadActivityTracker::ReleaseUserData(
-    ActivityId id,
-    ActivityTrackerMemoryAllocator* allocator) {
-  // User-data is only stored for activities actually held in the stack.
-  if (id < stack_slots_ && stack_[id].user_data_ref) {
-    allocator->ReleaseObjectReference(stack_[id].user_data_ref);
-    stack_[id].user_data_ref = 0;
-  }
-}
-
-void ThreadActivityTracker::RecordExceptionActivity(const void* program_counter,
-                                                    const void* origin,
-                                                    Activity::Type type,
-                                                    const ActivityData& data) {
-  // A thread-checker creates a lock to check the thread-id which means
-  // re-entry into this code if lock acquisitions are being tracked.
-  DCHECK(CalledOnValidThread());
-
-  // Fill the reusable exception activity.
-  Activity::FillFrom(&header_->last_exception, program_counter, origin, type,
-                     data);
-
-  // The data has changed meaning that some other thread trying to copy the
-  // contents for reporting purposes could get bad data.
-  header_->data_version.fetch_add(1, std::memory_order_relaxed);
-}
-
-bool ThreadActivityTracker::IsValid() const {
-  if (header_->owner.data_id.load(std::memory_order_acquire) == 0 ||
-      header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 ||
-      header_->start_time == 0 || header_->start_ticks == 0 ||
-      header_->stack_slots != stack_slots_ ||
-      header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
-    return false;
-  }
-
-  return valid_;
-}
-
-bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
-  DCHECK(output_snapshot);
-
-  // There is no "called on valid thread" check for this method as it can be
-  // called from other threads or even other processes. It is also the reason
-  // why atomic operations must be used in certain places above.
-
-  // It's possible for the data to change while reading it in such a way that it
-  // invalidates the read. Make several attempts but don't try forever.
-  const int kMaxAttempts = 10;
-  uint32_t depth;
-
-  // Stop here if the data isn't valid.
-  if (!IsValid())
-    return false;
-
-  // Allocate the maximum size for the stack so it doesn't have to be done
-  // during the time-sensitive snapshot operation. It is shrunk once the
-  // actual size is known.
-  output_snapshot->activity_stack.reserve(stack_slots_);
-
-  for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
-    // Remember the data IDs to ensure nothing is replaced during the snapshot
-    // operation. Use "acquire" so that all the non-atomic fields of the
-    // structure are valid (at least at the current moment in time).
-    const uint32_t starting_id =
-        header_->owner.data_id.load(std::memory_order_acquire);
-    const int64_t starting_create_stamp = header_->owner.create_stamp;
-    const int64_t starting_process_id = header_->owner.process_id;
-    const int64_t starting_thread_id = header_->thread_ref.as_id;
-
-    // Note the current |data_version| so it's possible to detect at the end
-    // that nothing has changed since copying the data began. A "cst" operation
-    // is required to ensure it occurs before everything else. Using "cst"
-    // memory ordering is relatively expensive but this is only done during
-    // analysis so doesn't directly affect the worker threads.
-    const uint32_t pre_version =
-        header_->data_version.load(std::memory_order_seq_cst);
-
-    // Fetching the current depth also "acquires" the contents of the stack.
-    depth = header_->current_depth.load(std::memory_order_acquire);
-    uint32_t count = std::min(depth, stack_slots_);
-    output_snapshot->activity_stack.resize(count);
-    if (count > 0) {
-      // Copy the existing contents. Memcpy is used for speed.
-      memcpy(&output_snapshot->activity_stack[0], stack_,
-             count * sizeof(Activity));
-    }
-
-    // Capture the last exception.
-    memcpy(&output_snapshot->last_exception, &header_->last_exception,
-           sizeof(Activity));
-
-    // TODO(bcwhite): Snapshot other things here.
-
-    // Retry if something changed during the copy. A "cst" operation ensures
-    // it must happen after all the above operations.
-    if (header_->data_version.load(std::memory_order_seq_cst) != pre_version)
-      continue;
-
-    // Stack copied. Record it's full depth.
-    output_snapshot->activity_stack_depth = depth;
-
-    // Get the general thread information.
-    output_snapshot->thread_name =
-        std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
-    output_snapshot->create_stamp = header_->owner.create_stamp;
-    output_snapshot->thread_id = header_->thread_ref.as_id;
-    output_snapshot->process_id = header_->owner.process_id;
-
-    // All characters of the thread-name buffer were copied so as to not break
-    // if the trailing NUL were missing. Now limit the length if the actual
-    // name is shorter.
-    output_snapshot->thread_name.resize(
-        strlen(output_snapshot->thread_name.c_str()));
-
-    // If the data ID has changed then the tracker has exited and the memory
-    // reused by a new one. Try again.
-    if (header_->owner.data_id.load(std::memory_order_seq_cst) != starting_id ||
-        output_snapshot->create_stamp != starting_create_stamp ||
-        output_snapshot->process_id != starting_process_id ||
-        output_snapshot->thread_id != starting_thread_id) {
-      continue;
-    }
-
-    // Only successful if the data is still valid once everything is done since
-    // it's possible for the thread to end somewhere in the middle and all its
-    // values become garbage.
-    if (!IsValid())
-      return false;
-
-    // Change all the timestamps in the activities from "ticks" to "wall" time.
-    const Time start_time = Time::FromInternalValue(header_->start_time);
-    const int64_t start_ticks = header_->start_ticks;
-    for (Activity& activity : output_snapshot->activity_stack) {
-      activity.time_internal =
-          WallTimeFromTickTime(start_ticks, activity.time_internal, start_time)
-              .ToInternalValue();
-    }
-    output_snapshot->last_exception.time_internal =
-        WallTimeFromTickTime(start_ticks,
-                             output_snapshot->last_exception.time_internal,
-                             start_time)
-            .ToInternalValue();
-
-    // Success!
-    return true;
-  }
-
-  // Too many attempts.
-  return false;
-}
-
-const void* ThreadActivityTracker::GetBaseAddress() {
-  return header_;
-}
-
-uint32_t ThreadActivityTracker::GetDataVersionForTesting() {
-  return header_->data_version.load(std::memory_order_relaxed);
-}
-
-void ThreadActivityTracker::SetOwningProcessIdForTesting(int64_t pid,
-                                                         int64_t stamp) {
-  header_->owner.SetOwningProcessIdForTesting(pid, stamp);
-}
-
-// static
-bool ThreadActivityTracker::GetOwningProcessId(const void* memory,
-                                               int64_t* out_id,
-                                               int64_t* out_stamp) {
-  const Header* header = reinterpret_cast<const Header*>(memory);
-  return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
-}
-
-// static
-size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
-  return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
-}
-
-bool ThreadActivityTracker::CalledOnValidThread() {
-#if DCHECK_IS_ON()
-  return thread_id_ == PlatformThreadRef();
-#else
-  return true;
-#endif
-}
-
-std::unique_ptr<ActivityUserData>
-ThreadActivityTracker::CreateUserDataForActivity(
-    Activity* activity,
-    ActivityTrackerMemoryAllocator* allocator) {
-  DCHECK_EQ(0U, activity->user_data_ref);
-
-  PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
-  void* memory = allocator->GetAsArray<char>(ref, kUserDataSize);
-  if (memory) {
-    std::unique_ptr<ActivityUserData> user_data =
-        std::make_unique<ActivityUserData>(memory, kUserDataSize);
-    activity->user_data_ref = ref;
-    activity->user_data_id = user_data->id();
-    return user_data;
-  }
-
-  // Return a dummy object that will still accept (but ignore) Set() calls.
-  return std::make_unique<ActivityUserData>();
-}
-
-// The instantiation of the GlobalActivityTracker object.
-// The object held here will obviously not be destructed at process exit
-// but that's best since PersistentMemoryAllocator objects (that underlie
-// GlobalActivityTracker objects) are explicitly forbidden from doing anything
-// essential at exit anyway due to the fact that they depend on data managed
-// elsewhere and which could be destructed first. An AtomicWord is used instead
-// of std::atomic because the latter can create global ctors and dtors.
-subtle::AtomicWord GlobalActivityTracker::g_tracker_ = 0;
-
-GlobalActivityTracker::ModuleInfo::ModuleInfo() = default;
-GlobalActivityTracker::ModuleInfo::ModuleInfo(ModuleInfo&& rhs) = default;
-GlobalActivityTracker::ModuleInfo::ModuleInfo(const ModuleInfo& rhs) = default;
-GlobalActivityTracker::ModuleInfo::~ModuleInfo() = default;
-
-GlobalActivityTracker::ModuleInfo& GlobalActivityTracker::ModuleInfo::operator=(
-    ModuleInfo&& rhs) = default;
-GlobalActivityTracker::ModuleInfo& GlobalActivityTracker::ModuleInfo::operator=(
-    const ModuleInfo& rhs) = default;
-
-GlobalActivityTracker::ModuleInfoRecord::ModuleInfoRecord() = default;
-GlobalActivityTracker::ModuleInfoRecord::~ModuleInfoRecord() = default;
-
-bool GlobalActivityTracker::ModuleInfoRecord::DecodeTo(
-    GlobalActivityTracker::ModuleInfo* info,
-    size_t record_size) const {
-  // Get the current "changes" indicator, acquiring all the other values.
-  uint32_t current_changes = changes.load(std::memory_order_acquire);
-
-  // Copy out the dynamic information.
-  info->is_loaded = loaded != 0;
-  info->address = static_cast<uintptr_t>(address);
-  info->load_time = load_time;
-
-  // Check to make sure no information changed while being read. A "seq-cst"
-  // operation is expensive but is only done during analysis and it's the only
-  // way to ensure this occurs after all the accesses above. If changes did
-  // occur then return a "not loaded" result so that |size| and |address|
-  // aren't expected to be accurate.
-  if ((current_changes & kModuleInformationChanging) != 0 ||
-      changes.load(std::memory_order_seq_cst) != current_changes) {
-    info->is_loaded = false;
-  }
-
-  // Copy out the static information. These never change so don't have to be
-  // protected by the atomic |current_changes| operations.
-  info->size = static_cast<size_t>(size);
-  info->timestamp = timestamp;
-  info->age = age;
-  memcpy(info->identifier, identifier, sizeof(info->identifier));
-
-  if (offsetof(ModuleInfoRecord, pickle) + pickle_size > record_size)
-    return false;
-  Pickle pickler(pickle, pickle_size);
-  PickleIterator iter(pickler);
-  return iter.ReadString(&info->file) && iter.ReadString(&info->debug_file);
-}
-
-GlobalActivityTracker::ModuleInfoRecord*
-GlobalActivityTracker::ModuleInfoRecord::CreateFrom(
-    const GlobalActivityTracker::ModuleInfo& info,
-    PersistentMemoryAllocator* allocator) {
-  Pickle pickler;
-  pickler.WriteString(info.file);
-  pickler.WriteString(info.debug_file);
-  size_t required_size = offsetof(ModuleInfoRecord, pickle) + pickler.size();
-  ModuleInfoRecord* record = allocator->New<ModuleInfoRecord>(required_size);
-  if (!record)
-    return nullptr;
-
-  // These fields never changes and are done before the record is made
-  // iterable so no thread protection is necessary.
-  record->size = info.size;
-  record->timestamp = info.timestamp;
-  record->age = info.age;
-  memcpy(record->identifier, info.identifier, sizeof(identifier));
-  memcpy(record->pickle, pickler.data(), pickler.size());
-  record->pickle_size = pickler.size();
-  record->changes.store(0, std::memory_order_relaxed);
-
-  // Initialize the owner info.
-  record->owner.Release_Initialize();
-
-  // Now set those fields that can change.
-  bool success = record->UpdateFrom(info);
-  DCHECK(success);
-  return record;
-}
-
-bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom(
-    const GlobalActivityTracker::ModuleInfo& info) {
-  // Updates can occur after the record is made visible so make changes atomic.
-  // A "strong" exchange ensures no false failures.
-  uint32_t old_changes = changes.load(std::memory_order_relaxed);
-  uint32_t new_changes = old_changes | kModuleInformationChanging;
-  if ((old_changes & kModuleInformationChanging) != 0 ||
-      !changes.compare_exchange_strong(old_changes, new_changes,
-                                       std::memory_order_acquire,
-                                       std::memory_order_acquire)) {
-    NOTREACHED() << "Multiple sources are updating module information.";
-    return false;
-  }
-
-  loaded = info.is_loaded ? 1 : 0;
-  address = info.address;
-  load_time = Time::Now().ToInternalValue();
-
-  bool success = changes.compare_exchange_strong(new_changes, old_changes + 1,
-                                                 std::memory_order_release,
-                                                 std::memory_order_relaxed);
-  DCHECK(success);
-  return true;
-}
-
-GlobalActivityTracker::ScopedThreadActivity::ScopedThreadActivity(
-    const void* program_counter,
-    const void* origin,
-    Activity::Type type,
-    const ActivityData& data,
-    bool lock_allowed)
-    : ThreadActivityTracker::ScopedActivity(GetOrCreateTracker(lock_allowed),
-                                            program_counter,
-                                            origin,
-                                            type,
-                                            data) {}
-
-GlobalActivityTracker::ScopedThreadActivity::~ScopedThreadActivity() {
-  if (tracker_ && tracker_->HasUserData(activity_id_)) {
-    GlobalActivityTracker* global = GlobalActivityTracker::Get();
-    AutoLock lock(global->user_data_allocator_lock_);
-    tracker_->ReleaseUserData(activity_id_, &global->user_data_allocator_);
-  }
-}
-
-ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() {
-  if (!user_data_) {
-    if (tracker_) {
-      GlobalActivityTracker* global = GlobalActivityTracker::Get();
-      AutoLock lock(global->user_data_allocator_lock_);
-      user_data_ =
-          tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
-    } else {
-      user_data_ = std::make_unique<ActivityUserData>();
-    }
-  }
-  return *user_data_;
-}
-
-GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory,
-                                                              size_t size,
-                                                              int64_t pid)
-    : ActivityUserData(memory, size, pid) {}
-
-GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() = default;
-
-void GlobalActivityTracker::ThreadSafeUserData::Set(StringPiece name,
-                                                    ValueType type,
-                                                    const void* memory,
-                                                    size_t size) {
-  AutoLock lock(data_lock_);
-  ActivityUserData::Set(name, type, memory, size);
-}
-
-GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
-    PersistentMemoryAllocator::Reference mem_reference,
-    void* base,
-    size_t size)
-    : ThreadActivityTracker(base, size),
-      mem_reference_(mem_reference),
-      mem_base_(base) {}
-
-GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() {
-  // The global |g_tracker_| must point to the owner of this class since all
-  // objects of this type must be destructed before |g_tracker_| can be changed
-  // (something that only occurs in tests).
-  DCHECK(g_tracker_);
-  GlobalActivityTracker::Get()->ReturnTrackerMemory(this);
-}
-
-void GlobalActivityTracker::CreateWithAllocator(
-    std::unique_ptr<PersistentMemoryAllocator> allocator,
-    int stack_depth,
-    int64_t process_id) {
-  // There's no need to do anything with the result. It is self-managing.
-  GlobalActivityTracker* global_tracker =
-      new GlobalActivityTracker(std::move(allocator), stack_depth, process_id);
-  // Create a tracker for this thread since it is known.
-  global_tracker->CreateTrackerForCurrentThread();
-}
-
-#if !defined(OS_NACL)
-// static
-bool GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
-                                           size_t size,
-                                           uint64_t id,
-                                           StringPiece name,
-                                           int stack_depth) {
-  DCHECK(!file_path.empty());
-  DCHECK_GE(static_cast<uint64_t>(std::numeric_limits<int64_t>::max()), size);
-
-  // Create and map the file into memory and make it globally available.
-  std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile());
-  bool success = mapped_file->Initialize(
-      File(file_path, File::FLAG_CREATE_ALWAYS | File::FLAG_READ |
-                          File::FLAG_WRITE | File::FLAG_SHARE_DELETE),
-      {0, size}, MemoryMappedFile::READ_WRITE_EXTEND);
-  if (!success)
-    return false;
-  if (!FilePersistentMemoryAllocator::IsFileAcceptable(*mapped_file, false))
-    return false;
-  CreateWithAllocator(std::make_unique<FilePersistentMemoryAllocator>(
-                          std::move(mapped_file), size, id, name, false),
-                      stack_depth, 0);
-  return true;
-}
-#endif  // !defined(OS_NACL)
-
-// static
-bool GlobalActivityTracker::CreateWithLocalMemory(size_t size,
-                                                  uint64_t id,
-                                                  StringPiece name,
-                                                  int stack_depth,
-                                                  int64_t process_id) {
-  CreateWithAllocator(
-      std::make_unique<LocalPersistentMemoryAllocator>(size, id, name),
-      stack_depth, process_id);
-  return true;
-}
-
-// static
-bool GlobalActivityTracker::CreateWithSharedMemory(
-    std::unique_ptr<SharedMemory> shm,
-    uint64_t id,
-    StringPiece name,
-    int stack_depth) {
-  if (shm->mapped_size() == 0 ||
-      !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
-    return false;
-  }
-  CreateWithAllocator(std::make_unique<SharedPersistentMemoryAllocator>(
-                          std::move(shm), id, name, false),
-                      stack_depth, 0);
-  return true;
-}
-
-// static
-bool GlobalActivityTracker::CreateWithSharedMemoryHandle(
-    const SharedMemoryHandle& handle,
-    size_t size,
-    uint64_t id,
-    StringPiece name,
-    int stack_depth) {
-  std::unique_ptr<SharedMemory> shm(
-      new SharedMemory(handle, /*readonly=*/false));
-  if (!shm->Map(size))
-    return false;
-  return CreateWithSharedMemory(std::move(shm), id, name, stack_depth);
-}
-
-// static
-void GlobalActivityTracker::SetForTesting(
-    std::unique_ptr<GlobalActivityTracker> tracker) {
-  CHECK(!subtle::NoBarrier_Load(&g_tracker_));
-  subtle::Release_Store(&g_tracker_,
-                        reinterpret_cast<uintptr_t>(tracker.release()));
-}
-
-// static
-std::unique_ptr<GlobalActivityTracker>
-GlobalActivityTracker::ReleaseForTesting() {
-  GlobalActivityTracker* tracker = Get();
-  if (!tracker)
-    return nullptr;
-
-  // Thread trackers assume that the global tracker is present for some
-  // operations so ensure that there aren't any.
-  tracker->ReleaseTrackerForCurrentThreadForTesting();
-  DCHECK_EQ(0, tracker->thread_tracker_count_.load(std::memory_order_relaxed));
-
-  subtle::Release_Store(&g_tracker_, 0);
-  return WrapUnique(tracker);
-}
-
-ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
-  DCHECK(!this_thread_tracker_.Get());
-
-  PersistentMemoryAllocator::Reference mem_reference;
-
-  {
-    base::AutoLock autolock(thread_tracker_allocator_lock_);
-    mem_reference = thread_tracker_allocator_.GetObjectReference();
-  }
-
-  if (!mem_reference) {
-    // Failure. This shouldn't happen. But be graceful if it does, probably
-    // because the underlying allocator wasn't given enough memory to satisfy
-    // to all possible requests.
-    NOTREACHED();
-    // Report the thread-count at which the allocator was full so that the
-    // failure can be seen and underlying memory resized appropriately.
-    UMA_HISTOGRAM_COUNTS_1000(
-        "ActivityTracker.ThreadTrackers.MemLimitTrackerCount",
-        thread_tracker_count_.load(std::memory_order_relaxed));
-    // Return null, just as if tracking wasn't enabled.
-    return nullptr;
-  }
-
-  // Convert the memory block found above into an actual memory address.
-  // Doing the conversion as a Header object enacts the 32/64-bit size
-  // consistency checks which would not otherwise be done. Unfortunately,
-  // some older compilers and MSVC don't have standard-conforming definitions
-  // of std::atomic which cause it not to be plain-old-data. Don't check on
-  // those platforms assuming that the checks on other platforms will be
-  // sufficient.
-  // TODO(bcwhite): Review this after major compiler releases.
-  DCHECK(mem_reference);
-  void* mem_base;
-  mem_base =
-      allocator_->GetAsObject<ThreadActivityTracker::Header>(mem_reference);
-
-  DCHECK(mem_base);
-  DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference));
-
-  // Create a tracker with the acquired memory and set it as the tracker
-  // for this particular thread in thread-local-storage.
-  ManagedActivityTracker* tracker =
-      new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_);
-  DCHECK(tracker->IsValid());
-  this_thread_tracker_.Set(tracker);
-  int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed);
-
-  UMA_HISTOGRAM_EXACT_LINEAR("ActivityTracker.ThreadTrackers.Count",
-                             old_count + 1, static_cast<int>(kMaxThreadCount));
-  return tracker;
-}
-
-void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
-  ThreadActivityTracker* tracker =
-      reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
-  if (tracker) {
-    this_thread_tracker_.Set(nullptr);
-    delete tracker;
-  }
-}
-
-void GlobalActivityTracker::SetBackgroundTaskRunner(
-    const scoped_refptr<TaskRunner>& runner) {
-  AutoLock lock(global_tracker_lock_);
-  background_task_runner_ = runner;
-}
-
-void GlobalActivityTracker::SetProcessExitCallback(
-    ProcessExitCallback callback) {
-  AutoLock lock(global_tracker_lock_);
-  process_exit_callback_ = callback;
-}
-
-void GlobalActivityTracker::RecordProcessLaunch(
-    ProcessId process_id,
-    const FilePath::StringType& cmd) {
-  const int64_t pid = process_id;
-  DCHECK_NE(GetProcessId(), pid);
-  DCHECK_NE(0, pid);
-
-  base::AutoLock lock(global_tracker_lock_);
-  if (base::ContainsKey(known_processes_, pid)) {
-    // TODO(bcwhite): Measure this in UMA.
-    NOTREACHED() << "Process #" << process_id
-                 << " was previously recorded as \"launched\""
-                 << " with no corresponding exit.\n"
-                 << known_processes_[pid];
-    known_processes_.erase(pid);
-  }
-
-#if defined(OS_WIN)
-  known_processes_.insert(std::make_pair(pid, UTF16ToUTF8(cmd)));
-#else
-  known_processes_.insert(std::make_pair(pid, cmd));
-#endif
-}
-
-void GlobalActivityTracker::RecordProcessLaunch(
-    ProcessId process_id,
-    const FilePath::StringType& exe,
-    const FilePath::StringType& args) {
-  if (exe.find(FILE_PATH_LITERAL(" "))) {
-    RecordProcessLaunch(process_id,
-                        FilePath::StringType(FILE_PATH_LITERAL("\"")) + exe +
-                            FILE_PATH_LITERAL("\" ") + args);
-  } else {
-    RecordProcessLaunch(process_id, exe + FILE_PATH_LITERAL(' ') + args);
-  }
-}
-
-void GlobalActivityTracker::RecordProcessExit(ProcessId process_id,
-                                              int exit_code) {
-  const int64_t pid = process_id;
-  DCHECK_NE(GetProcessId(), pid);
-  DCHECK_NE(0, pid);
-
-  scoped_refptr<TaskRunner> task_runner;
-  std::string command_line;
-  {
-    base::AutoLock lock(global_tracker_lock_);
-    task_runner = background_task_runner_;
-    auto found = known_processes_.find(pid);
-    if (found != known_processes_.end()) {
-      command_line = std::move(found->second);
-      known_processes_.erase(found);
-    } else {
-      DLOG(ERROR) << "Recording exit of unknown process #" << process_id;
-    }
-  }
-
-  // Use the current time to differentiate the process that just exited
-  // from any that might be created in the future with the same ID.
-  int64_t now_stamp = Time::Now().ToInternalValue();
-
-  // The persistent allocator is thread-safe so run the iteration and
-  // adjustments on a worker thread if one was provided.
-  if (task_runner && !task_runner->RunsTasksInCurrentSequence()) {
-    task_runner->PostTask(
-        FROM_HERE,
-        BindOnce(&GlobalActivityTracker::CleanupAfterProcess, Unretained(this),
-                 pid, now_stamp, exit_code, std::move(command_line)));
-    return;
-  }
-
-  CleanupAfterProcess(pid, now_stamp, exit_code, std::move(command_line));
-}
-
-void GlobalActivityTracker::SetProcessPhase(ProcessPhase phase) {
-  process_data().SetInt(kProcessPhaseDataKey, phase);
-}
-
-void GlobalActivityTracker::CleanupAfterProcess(int64_t process_id,
-                                                int64_t exit_stamp,
-                                                int exit_code,
-                                                std::string&& command_line) {
-  // The process may not have exited cleanly so its necessary to go through
-  // all the data structures it may have allocated in the persistent memory
-  // segment and mark them as "released". This will allow them to be reused
-  // later on.
-
-  PersistentMemoryAllocator::Iterator iter(allocator_.get());
-  PersistentMemoryAllocator::Reference ref;
-
-  ProcessExitCallback process_exit_callback;
-  {
-    AutoLock lock(global_tracker_lock_);
-    process_exit_callback = process_exit_callback_;
-  }
-  if (process_exit_callback) {
-    // Find the processes user-data record so the process phase can be passed
-    // to the callback.
-    ActivityUserData::Snapshot process_data_snapshot;
-    while ((ref = iter.GetNextOfType(kTypeIdProcessDataRecord)) != 0) {
-      const void* memory = allocator_->GetAsArray<char>(
-          ref, kTypeIdProcessDataRecord, PersistentMemoryAllocator::kSizeAny);
-      if (!memory)
-        continue;
-      int64_t found_id;
-      int64_t create_stamp;
-      if (ActivityUserData::GetOwningProcessId(memory, &found_id,
-                                               &create_stamp)) {
-        if (found_id == process_id && create_stamp < exit_stamp) {
-          const ActivityUserData process_data(const_cast<void*>(memory),
-                                              allocator_->GetAllocSize(ref));
-          process_data.CreateSnapshot(&process_data_snapshot);
-          break;  // No need to look for any others.
-        }
-      }
-    }
-    iter.Reset();  // So it starts anew when used below.
-
-    // Record the process's phase at exit so callback doesn't need to go
-    // searching based on a private key value.
-    ProcessPhase exit_phase = PROCESS_PHASE_UNKNOWN;
-    auto phase = process_data_snapshot.find(kProcessPhaseDataKey);
-    if (phase != process_data_snapshot.end())
-      exit_phase = static_cast<ProcessPhase>(phase->second.GetInt());
-
-    // Perform the callback.
-    process_exit_callback.Run(process_id, exit_stamp, exit_code, exit_phase,
-                              std::move(command_line),
-                              std::move(process_data_snapshot));
-  }
-
-  // Find all allocations associated with the exited process and free them.
-  uint32_t type;
-  while ((ref = iter.GetNext(&type)) != 0) {
-    switch (type) {
-      case kTypeIdActivityTracker:
-      case kTypeIdUserDataRecord:
-      case kTypeIdProcessDataRecord:
-      case ModuleInfoRecord::kPersistentTypeId: {
-        const void* memory = allocator_->GetAsArray<char>(
-            ref, type, PersistentMemoryAllocator::kSizeAny);
-        if (!memory)
-          continue;
-        int64_t found_id;
-        int64_t create_stamp;
-
-        // By convention, the OwningProcess structure is always the first
-        // field of the structure so there's no need to handle all the
-        // cases separately.
-        if (OwningProcess::GetOwningProcessId(memory, &found_id,
-                                              &create_stamp)) {
-          // Only change the type to be "free" if the process ID matches and
-          // the creation time is before the exit time (so PID re-use doesn't
-          // cause the erasure of something that is in-use). Memory is cleared
-          // here, rather than when it's needed, so as to limit the impact at
-          // that critical time.
-          if (found_id == process_id && create_stamp < exit_stamp)
-            allocator_->ChangeType(ref, ~type, type, /*clear=*/true);
-        }
-      } break;
-    }
-  }
-}
-
-void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
-  // Allocate at least one extra byte so the string is NUL terminated. All
-  // memory returned by the allocator is guaranteed to be zeroed.
-  PersistentMemoryAllocator::Reference ref =
-      allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage);
-  char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage,
-                                              message.size() + 1);
-  if (memory) {
-    memcpy(memory, message.data(), message.size());
-    allocator_->MakeIterable(ref);
-  }
-}
-
-void GlobalActivityTracker::RecordModuleInfo(const ModuleInfo& info) {
-  AutoLock lock(modules_lock_);
-  auto found = modules_.find(info.file);
-  if (found != modules_.end()) {
-    ModuleInfoRecord* record = found->second;
-    DCHECK(record);
-
-    // Update the basic state of module information that has been already
-    // recorded. It is assumed that the string information (identifier,
-    // version, etc.) remain unchanged which means that there's no need
-    // to create a new record to accommodate a possibly longer length.
-    record->UpdateFrom(info);
-    return;
-  }
-
-  ModuleInfoRecord* record =
-      ModuleInfoRecord::CreateFrom(info, allocator_.get());
-  if (!record)
-    return;
-  allocator_->MakeIterable(record);
-  modules_.emplace(info.file, record);
-}
-
-void GlobalActivityTracker::RecordFieldTrial(const std::string& trial_name,
-                                             StringPiece group_name) {
-  const std::string key = std::string("FieldTrial.") + trial_name;
-  process_data_.SetString(key, group_name);
-}
-
-void GlobalActivityTracker::RecordException(const void* pc,
-                                            const void* origin,
-                                            uint32_t code) {
-  RecordExceptionImpl(pc, origin, code);
-}
-
-void GlobalActivityTracker::MarkDeleted() {
-  allocator_->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
-}
-
-GlobalActivityTracker::GlobalActivityTracker(
-    std::unique_ptr<PersistentMemoryAllocator> allocator,
-    int stack_depth,
-    int64_t process_id)
-    : allocator_(std::move(allocator)),
-      stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
-      process_id_(process_id == 0 ? GetCurrentProcId() : process_id),
-      this_thread_tracker_(&OnTLSDestroy),
-      thread_tracker_count_(0),
-      thread_tracker_allocator_(allocator_.get(),
-                                kTypeIdActivityTracker,
-                                kTypeIdActivityTrackerFree,
-                                stack_memory_size_,
-                                kCachedThreadMemories,
-                                /*make_iterable=*/true),
-      user_data_allocator_(allocator_.get(),
-                           kTypeIdUserDataRecord,
-                           kTypeIdUserDataRecordFree,
-                           kUserDataSize,
-                           kCachedUserDataMemories,
-                           /*make_iterable=*/true),
-      process_data_(allocator_->GetAsArray<char>(
-                        AllocateFrom(allocator_.get(),
-                                     kTypeIdProcessDataRecordFree,
-                                     kProcessDataSize,
-                                     kTypeIdProcessDataRecord),
-                        kTypeIdProcessDataRecord,
-                        kProcessDataSize),
-                    kProcessDataSize,
-                    process_id_) {
-  DCHECK_NE(0, process_id_);
-
-  // Ensure that there is no other global object and then make this one such.
-  DCHECK(!g_tracker_);
-  subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this));
-
-  // The data records must be iterable in order to be found by an analyzer.
-  allocator_->MakeIterable(allocator_->GetAsReference(
-      process_data_.GetBaseAddress(), kTypeIdProcessDataRecord));
-
-  // Note that this process has launched.
-  SetProcessPhase(PROCESS_LAUNCHED);
-
-  // Fetch and record all activated field trials.
-  FieldTrial::ActiveGroups active_groups;
-  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
-  for (auto& group : active_groups)
-    RecordFieldTrial(group.trial_name, group.group_name);
-}
-
-GlobalActivityTracker::~GlobalActivityTracker() {
-  DCHECK(Get() == nullptr || Get() == this);
-  DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
-  subtle::Release_Store(&g_tracker_, 0);
-}
-
-void GlobalActivityTracker::ReturnTrackerMemory(
-    ManagedActivityTracker* tracker) {
-  PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_;
-  void* mem_base = tracker->mem_base_;
-  DCHECK(mem_reference);
-  DCHECK(mem_base);
-
-  // Remove the destructed tracker from the set of known ones.
-  DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed));
-  thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed);
-
-  // Release this memory for re-use at a later time.
-  base::AutoLock autolock(thread_tracker_allocator_lock_);
-  thread_tracker_allocator_.ReleaseObjectReference(mem_reference);
-}
-
-void GlobalActivityTracker::RecordExceptionImpl(const void* pc,
-                                                const void* origin,
-                                                uint32_t code) {
-  // Get an existing tracker for this thread. It's not possible to create
-  // one at this point because such would involve memory allocations and
-  // other potentially complex operations that can cause failures if done
-  // within an exception handler. In most cases various operations will
-  // have already created the tracker so this shouldn't generally be a
-  // problem.
-  ThreadActivityTracker* tracker = GetTrackerForCurrentThread();
-  if (!tracker)
-    return;
-
-  tracker->RecordExceptionActivity(pc, origin, Activity::ACT_EXCEPTION,
-                                   ActivityData::ForException(code));
-}
-
-// static
-void GlobalActivityTracker::OnTLSDestroy(void* value) {
-  delete reinterpret_cast<ManagedActivityTracker*>(value);
-}
-
-ScopedActivity::ScopedActivity(const void* program_counter,
-                               uint8_t action,
-                               uint32_t id,
-                               int32_t info)
-    : GlobalActivityTracker::ScopedThreadActivity(
-          program_counter,
-          nullptr,
-          static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
-          ActivityData::ForGeneric(id, info),
-          /*lock_allowed=*/true),
-      id_(id) {
-  // The action must not affect the category bits of the activity type.
-  DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
-}
-
-void ScopedActivity::ChangeAction(uint8_t action) {
-  DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
-  ChangeTypeAndData(static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
-                    kNullActivityData);
-}
-
-void ScopedActivity::ChangeInfo(int32_t info) {
-  ChangeTypeAndData(Activity::ACT_NULL, ActivityData::ForGeneric(id_, info));
-}
-
-void ScopedActivity::ChangeActionAndInfo(uint8_t action, int32_t info) {
-  DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
-  ChangeTypeAndData(static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
-                    ActivityData::ForGeneric(id_, info));
-}
-
-ScopedTaskRunActivity::ScopedTaskRunActivity(
-    const void* program_counter,
-    const base::PendingTask& task)
-    : GlobalActivityTracker::ScopedThreadActivity(
-          program_counter,
-          task.posted_from.program_counter(),
-          Activity::ACT_TASK_RUN,
-          ActivityData::ForTask(task.sequence_num),
-          /*lock_allowed=*/true) {}
-
-ScopedLockAcquireActivity::ScopedLockAcquireActivity(
-    const void* program_counter,
-    const base::internal::LockImpl* lock)
-    : GlobalActivityTracker::ScopedThreadActivity(
-          program_counter,
-          nullptr,
-          Activity::ACT_LOCK_ACQUIRE,
-          ActivityData::ForLock(lock),
-          /*lock_allowed=*/false) {}
-
-ScopedEventWaitActivity::ScopedEventWaitActivity(
-    const void* program_counter,
-    const base::WaitableEvent* event)
-    : GlobalActivityTracker::ScopedThreadActivity(
-          program_counter,
-          nullptr,
-          Activity::ACT_EVENT_WAIT,
-          ActivityData::ForEvent(event),
-          /*lock_allowed=*/true) {}
-
-ScopedThreadJoinActivity::ScopedThreadJoinActivity(
-    const void* program_counter,
-    const base::PlatformThreadHandle* thread)
-    : GlobalActivityTracker::ScopedThreadActivity(
-          program_counter,
-          nullptr,
-          Activity::ACT_THREAD_JOIN,
-          ActivityData::ForThread(*thread),
-          /*lock_allowed=*/true) {}
-
-#if !defined(OS_NACL) && !defined(OS_IOS)
-ScopedProcessWaitActivity::ScopedProcessWaitActivity(
-    const void* program_counter,
-    const base::Process* process)
-    : GlobalActivityTracker::ScopedThreadActivity(
-          program_counter,
-          nullptr,
-          Activity::ACT_PROCESS_WAIT,
-          ActivityData::ForProcess(process->Pid()),
-          /*lock_allowed=*/true) {}
-#endif
-
-}  // namespace debug
-}  // namespace base
diff --git a/base/debug/activity_tracker.h b/base/debug/activity_tracker.h
deleted file mode 100644
index bfd9f9d..0000000
--- a/base/debug/activity_tracker.h
+++ /dev/null
@@ -1,1360 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Activity tracking provides a low-overhead method of collecting information
-// about the state of the application for analysis both while it is running
-// and after it has terminated unexpectedly. Its primary purpose is to help
-// locate reasons the browser becomes unresponsive by providing insight into
-// what all the various threads and processes are (or were) doing.
-
-#ifndef BASE_DEBUG_ACTIVITY_TRACKER_H_
-#define BASE_DEBUG_ACTIVITY_TRACKER_H_
-
-// std::atomic is undesired due to performance issues when used as global
-// variables. There are no such instances here. This module uses the
-// PersistentMemoryAllocator which also uses std::atomic and is written
-// by the same author.
-#include <atomic>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "base/atomicops.h"
-#include "base/base_export.h"
-#include "base/callback.h"
-#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
-#include "base/location.h"
-#include "base/memory/shared_memory.h"
-#include "base/metrics/persistent_memory_allocator.h"
-#include "base/process/process_handle.h"
-#include "base/strings/string_piece.h"
-#include "base/strings/utf_string_conversions.h"
-#include "base/task_runner.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/thread_local_storage.h"
-
-namespace base {
-
-struct PendingTask;
-
-class FilePath;
-class Lock;
-class PlatformThreadHandle;
-class Process;
-class WaitableEvent;
-
-namespace debug {
-
-class ThreadActivityTracker;
-
-
-enum : int {
-  // The maximum number of call-stack addresses stored per activity. This
-  // cannot be changed without also changing the version number of the
-  // structure. See kTypeIdActivityTracker in GlobalActivityTracker.
-  kActivityCallStackSize = 10,
-};
-
-// A class for keeping all information needed to verify that a structure is
-// associated with a given process.
-struct OwningProcess {
-  OwningProcess();
-  ~OwningProcess();
-
-  // Initializes structure with the current process id and the current time.
-  // These can uniquely identify a process. A unique non-zero data_id will be
-  // set making it possible to tell using atomic reads if the data has changed.
-  void Release_Initialize(int64_t pid = 0);
-
-  // Explicitly sets the process ID.
-  void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
-
-  // Gets the associated process ID, in native form, and the creation timestamp
-  // from memory without loading the entire structure for analysis. This will
-  // return false if no valid process ID is available.
-  static bool GetOwningProcessId(const void* memory,
-                                 int64_t* out_id,
-                                 int64_t* out_stamp);
-
-  // SHA1(base::debug::OwningProcess): Increment this if structure changes!
-  static constexpr uint32_t kPersistentTypeId = 0xB1179672 + 1;
-
-  // Expected size for 32/64-bit check by PersistentMemoryAllocator.
-  static constexpr size_t kExpectedInstanceSize = 24;
-
-  std::atomic<uint32_t> data_id;
-  uint32_t padding;
-  int64_t process_id;
-  int64_t create_stamp;
-};
-
-// The data associated with an activity is dependent upon the activity type.
-// This union defines all of the various fields. All fields must be explicitly
-// sized types to ensure no interoperability problems between 32-bit and
-// 64-bit systems.
-union ActivityData {
-  // Expected size for 32/64-bit check.
-  // TODO(bcwhite): VC2015 doesn't allow statics in unions. Fix when it does.
-  // static constexpr size_t kExpectedInstanceSize = 8;
-
-  // Generic activities don't have any defined structure.
-  struct {
-    uint32_t id;   // An arbitrary identifier used for association.
-    int32_t info;  // An arbitrary value used for information purposes.
-  } generic;
-  struct {
-    uint64_t sequence_id;  // The sequence identifier of the posted task.
-  } task;
-  struct {
-    uint64_t lock_address;  // The memory address of the lock object.
-  } lock;
-  struct {
-    uint64_t event_address;  // The memory address of the event object.
-  } event;
-  struct {
-    int64_t thread_id;  // A unique identifier for a thread within a process.
-  } thread;
-  struct {
-    int64_t process_id;  // A unique identifier for a process.
-  } process;
-  struct {
-    uint32_t code;  // An "exception code" number.
-  } exception;
-
-  // These methods create an ActivityData object from the appropriate
-  // parameters. Objects of this type should always be created this way to
-  // ensure that no fields remain unpopulated should the set of recorded
-  // fields change. They're defined inline where practical because they
-  // reduce to loading a small local structure with a few values, roughly
-  // the same as loading all those values into parameters.
-
-  static ActivityData ForGeneric(uint32_t id, int32_t info) {
-    ActivityData data;
-    data.generic.id = id;
-    data.generic.info = info;
-    return data;
-  }
-
-  static ActivityData ForTask(uint64_t sequence) {
-    ActivityData data;
-    data.task.sequence_id = sequence;
-    return data;
-  }
-
-  static ActivityData ForLock(const void* lock) {
-    ActivityData data;
-    data.lock.lock_address = reinterpret_cast<uintptr_t>(lock);
-    return data;
-  }
-
-  static ActivityData ForEvent(const void* event) {
-    ActivityData data;
-    data.event.event_address = reinterpret_cast<uintptr_t>(event);
-    return data;
-  }
-
-  static ActivityData ForThread(const PlatformThreadHandle& handle);
-  static ActivityData ForThread(const int64_t id) {
-    ActivityData data;
-    data.thread.thread_id = id;
-    return data;
-  }
-
-  static ActivityData ForProcess(const int64_t id) {
-    ActivityData data;
-    data.process.process_id = id;
-    return data;
-  }
-
-  static ActivityData ForException(const uint32_t code) {
-    ActivityData data;
-    data.exception.code = code;
-    return data;
-  }
-};
-
-// A "null" activity-data that can be passed to indicate "do not change".
-extern const ActivityData kNullActivityData;
-
-
-// A helper class that is used for managing memory allocations within a
-// persistent memory allocator. Instances of this class are NOT thread-safe.
-// Use from a single thread or protect access with a lock.
-class BASE_EXPORT ActivityTrackerMemoryAllocator {
- public:
-  using Reference = PersistentMemoryAllocator::Reference;
-
-  // Creates a instance for allocating objects of a fixed |object_type|, a
-  // corresponding |object_free| type, and the |object_size|. An internal
-  // cache of the last |cache_size| released references will be kept for
-  // quick future fetches. If |make_iterable| then allocated objects will
-  // be marked "iterable" in the allocator.
-  ActivityTrackerMemoryAllocator(PersistentMemoryAllocator* allocator,
-                                 uint32_t object_type,
-                                 uint32_t object_free_type,
-                                 size_t object_size,
-                                 size_t cache_size,
-                                 bool make_iterable);
-  ~ActivityTrackerMemoryAllocator();
-
-  // Gets a reference to an object of the configured type. This can return
-  // a null reference if it was not possible to allocate the memory.
-  Reference GetObjectReference();
-
-  // Returns an object to the "free" pool.
-  void ReleaseObjectReference(Reference ref);
-
-  // Helper function to access an object allocated using this instance.
-  template <typename T>
-  T* GetAsObject(Reference ref) {
-    return allocator_->GetAsObject<T>(ref);
-  }
-
-  // Similar to GetAsObject() but converts references to arrays of objects.
-  template <typename T>
-  T* GetAsArray(Reference ref, size_t count) {
-    return allocator_->GetAsArray<T>(ref, object_type_, count);
-  }
-
-  // The current "used size" of the internal cache, visible for testing.
-  size_t cache_used() const { return cache_used_; }
-
- private:
-  PersistentMemoryAllocator* const allocator_;
-  const uint32_t object_type_;
-  const uint32_t object_free_type_;
-  const size_t object_size_;
-  const size_t cache_size_;
-  const bool make_iterable_;
-
-  // An iterator for going through persistent memory looking for free'd objects.
-  PersistentMemoryAllocator::Iterator iterator_;
-
-  // The cache of released object memories.
-  std::unique_ptr<Reference[]> cache_values_;
-  size_t cache_used_;
-
-  DISALLOW_COPY_AND_ASSIGN(ActivityTrackerMemoryAllocator);
-};
-
-
-// This structure is the full contents recorded for every activity pushed
-// onto the stack. The |activity_type| indicates what is actually stored in
-// the |data| field. All fields must be explicitly sized types to ensure no
-// interoperability problems between 32-bit and 64-bit systems.
-struct Activity {
-  // SHA1(base::debug::Activity): Increment this if structure changes!
-  static constexpr uint32_t kPersistentTypeId = 0x99425159 + 1;
-  // Expected size for 32/64-bit check. Update this if structure changes!
-  static constexpr size_t kExpectedInstanceSize =
-      48 + 8 * kActivityCallStackSize;
-
-  // The type of an activity on the stack. Activities are broken into
-  // categories with the category ID taking the top 4 bits and the lower
-  // bits representing an action within that category. This combination
-  // makes it easy to "switch" based on the type during analysis.
-  enum Type : uint8_t {
-    // This "null" constant is used to indicate "do not change" in calls.
-    ACT_NULL = 0,
-
-    // Task activities involve callbacks posted to a thread or thread-pool
-    // using the PostTask() method or any of its friends.
-    ACT_TASK = 1 << 4,
-    ACT_TASK_RUN = ACT_TASK,
-
-    // Lock activities involve the acquisition of "mutex" locks.
-    ACT_LOCK = 2 << 4,
-    ACT_LOCK_ACQUIRE = ACT_LOCK,
-    ACT_LOCK_RELEASE,
-
-    // Event activities involve operations on a WaitableEvent.
-    ACT_EVENT = 3 << 4,
-    ACT_EVENT_WAIT = ACT_EVENT,
-    ACT_EVENT_SIGNAL,
-
-    // Thread activities involve the life management of threads.
-    ACT_THREAD = 4 << 4,
-    ACT_THREAD_START = ACT_THREAD,
-    ACT_THREAD_JOIN,
-
-    // Process activities involve the life management of processes.
-    ACT_PROCESS = 5 << 4,
-    ACT_PROCESS_START = ACT_PROCESS,
-    ACT_PROCESS_WAIT,
-
-    // Exception activities indicate the occurence of something unexpected.
-    ACT_EXCEPTION = 14 << 4,
-
-    // Generic activities are user defined and can be anything.
-    ACT_GENERIC = 15 << 4,
-
-    // These constants can be used to separate the category and action from
-    // a combined activity type.
-    ACT_CATEGORY_MASK = 0xF << 4,
-    ACT_ACTION_MASK = 0xF
-  };
-
-  // Internal representation of time. During collection, this is in "ticks"
-  // but when returned in a snapshot, it is "wall time".
-  int64_t time_internal;
-
-  // The address that pushed the activity onto the stack as a raw number.
-  uint64_t calling_address;
-
-  // The address that is the origin of the activity if it not obvious from
-  // the call stack. This is useful for things like tasks that are posted
-  // from a completely different thread though most activities will leave
-  // it null.
-  uint64_t origin_address;
-
-  // Array of program-counters that make up the top of the call stack.
-  // Despite the fixed size, this list is always null-terminated. Entries
-  // after the terminator have no meaning and may or may not also be null.
-  // The list will be completely empty if call-stack collection is not
-  // enabled.
-  uint64_t call_stack[kActivityCallStackSize];
-
-  // Reference to arbitrary user data within the persistent memory segment
-  // and a unique identifier for it.
-  uint32_t user_data_ref;
-  uint32_t user_data_id;
-
-  // The (enumerated) type of the activity. This defines what fields of the
-  // |data| record are valid.
-  uint8_t activity_type;
-
-  // Padding to ensure that the next member begins on a 64-bit boundary
-  // even on 32-bit builds which ensures inter-operability between CPU
-  // architectures. New fields can be taken from this space.
-  uint8_t padding[7];
-
-  // Information specific to the |activity_type|.
-  ActivityData data;
-
-  static void FillFrom(Activity* activity,
-                       const void* program_counter,
-                       const void* origin,
-                       Type type,
-                       const ActivityData& data);
-};
-
-// This class manages arbitrary user data that can be associated with activities
-// done by a thread by supporting key/value pairs of any type. This can provide
-// additional information during debugging. It is also used to store arbitrary
-// global data. All updates must be done from the same thread though other
-// threads can read it concurrently if they create new objects using the same
-// memory.
-class BASE_EXPORT ActivityUserData {
- public:
-  // List of known value type. REFERENCE types must immediately follow the non-
-  // external types.
-  enum ValueType : uint8_t {
-    END_OF_VALUES = 0,
-    RAW_VALUE,
-    RAW_VALUE_REFERENCE,
-    STRING_VALUE,
-    STRING_VALUE_REFERENCE,
-    CHAR_VALUE,
-    BOOL_VALUE,
-    SIGNED_VALUE,
-    UNSIGNED_VALUE,
-  };
-
-  class BASE_EXPORT TypedValue {
-   public:
-    TypedValue();
-    TypedValue(const TypedValue& other);
-    ~TypedValue();
-
-    ValueType type() const { return type_; }
-
-    // These methods return the extracted value in the correct format.
-    StringPiece Get() const;
-    StringPiece GetString() const;
-    bool GetBool() const;
-    char GetChar() const;
-    int64_t GetInt() const;
-    uint64_t GetUint() const;
-
-    // These methods return references to process memory as originally provided
-    // to corresponding Set calls. USE WITH CAUTION! There is no guarantee that
-    // the referenced memory is assessible or useful.  It's possible that:
-    //  - the memory was free'd and reallocated for a different purpose
-    //  - the memory has been released back to the OS
-    //  - the memory belongs to a different process's address space
-    // Dereferencing the returned StringPiece when the memory is not accessible
-    // will cause the program to SEGV!
-    StringPiece GetReference() const;
-    StringPiece GetStringReference() const;
-
-   private:
-    friend class ActivityUserData;
-
-    ValueType type_ = END_OF_VALUES;
-    uint64_t short_value_;    // Used to hold copy of numbers, etc.
-    std::string long_value_;  // Used to hold copy of raw/string data.
-    StringPiece ref_value_;   // Used to hold reference to external data.
-  };
-
-  using Snapshot = std::map<std::string, TypedValue>;
-
-  // Initialize the object either as a "sink" that just accepts and discards
-  // data or an active one that writes to a given (zeroed) memory block.
-  ActivityUserData();
-  ActivityUserData(void* memory, size_t size, int64_t pid = 0);
-  virtual ~ActivityUserData();
-
-  // Gets the unique ID number for this user data. If this changes then the
-  // contents have been overwritten by another thread. The return value is
-  // always non-zero unless it's actually just a data "sink".
-  uint32_t id() const {
-    return header_ ? header_->owner.data_id.load(std::memory_order_relaxed) : 0;
-  }
-
-  // Writes a |value| (as part of a key/value pair) that will be included with
-  // the activity in any reports. The same |name| can be written multiple times
-  // with each successive call overwriting the previously stored |value|. For
-  // raw and string values, the maximum size of successive writes is limited by
-  // the first call. The length of "name" is limited to 255 characters.
-  //
-  // This information is stored on a "best effort" basis. It may be dropped if
-  // the memory buffer is full or the associated activity is beyond the maximum
-  // recording depth.
-  void Set(StringPiece name, const void* memory, size_t size) {
-    Set(name, RAW_VALUE, memory, size);
-  }
-  void SetString(StringPiece name, StringPiece value) {
-    Set(name, STRING_VALUE, value.data(), value.length());
-  }
-  void SetString(StringPiece name, StringPiece16 value) {
-    SetString(name, UTF16ToUTF8(value));
-  }
-  void SetBool(StringPiece name, bool value) {
-    char cvalue = value ? 1 : 0;
-    Set(name, BOOL_VALUE, &cvalue, sizeof(cvalue));
-  }
-  void SetChar(StringPiece name, char value) {
-    Set(name, CHAR_VALUE, &value, sizeof(value));
-  }
-  void SetInt(StringPiece name, int64_t value) {
-    Set(name, SIGNED_VALUE, &value, sizeof(value));
-  }
-  void SetUint(StringPiece name, uint64_t value) {
-    Set(name, UNSIGNED_VALUE, &value, sizeof(value));
-  }
-
-  // These function as above but don't actually copy the data into the
-  // persistent memory. They store unaltered pointers along with a size. These
-  // can be used in conjuction with a memory dump to find certain large pieces
-  // of information.
-  void SetReference(StringPiece name, const void* memory, size_t size) {
-    SetReference(name, RAW_VALUE_REFERENCE, memory, size);
-  }
-  void SetStringReference(StringPiece name, StringPiece value) {
-    SetReference(name, STRING_VALUE_REFERENCE, value.data(), value.length());
-  }
-
-  // Creates a snapshot of the key/value pairs contained within. The returned
-  // data will be fixed, independent of whatever changes afterward. There is
-  // some protection against concurrent modification. This will return false
-  // if the data is invalid or if a complete overwrite of the contents is
-  // detected.
-  bool CreateSnapshot(Snapshot* output_snapshot) const;
-
-  // Gets the base memory address used for storing data.
-  const void* GetBaseAddress() const;
-
-  // Explicitly sets the process ID.
-  void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
-
-  // Gets the associated process ID, in native form, and the creation timestamp
-  // from tracker memory without loading the entire structure for analysis. This
-  // will return false if no valid process ID is available.
-  static bool GetOwningProcessId(const void* memory,
-                                 int64_t* out_id,
-                                 int64_t* out_stamp);
-
- protected:
-  virtual void Set(StringPiece name,
-                   ValueType type,
-                   const void* memory,
-                   size_t size);
-
- private:
-  FRIEND_TEST_ALL_PREFIXES(ActivityTrackerTest, UserDataTest);
-
-  enum : size_t { kMemoryAlignment = sizeof(uint64_t) };
-
-  // A structure that defines the structure header in memory.
-  struct MemoryHeader {
-    MemoryHeader();
-    ~MemoryHeader();
-
-    OwningProcess owner;  // Information about the creating process.
-  };
-
-  // Header to a key/value record held in persistent memory.
-  struct FieldHeader {
-    FieldHeader();
-    ~FieldHeader();
-
-    std::atomic<uint8_t> type;         // Encoded ValueType
-    uint8_t name_size;                 // Length of "name" key.
-    std::atomic<uint16_t> value_size;  // Actual size of of the stored value.
-    uint16_t record_size;              // Total storage of name, value, header.
-  };
-
-  // A structure used to reference data held outside of persistent memory.
-  struct ReferenceRecord {
-    uint64_t address;
-    uint64_t size;
-  };
-
-  // This record is used to hold known value is a map so that they can be
-  // found and overwritten later.
-  struct ValueInfo {
-    ValueInfo();
-    ValueInfo(ValueInfo&&);
-    ~ValueInfo();
-
-    StringPiece name;                 // The "key" of the record.
-    ValueType type;                   // The type of the value.
-    void* memory;                     // Where the "value" is held.
-    std::atomic<uint16_t>* size_ptr;  // Address of the actual size of value.
-    size_t extent;                    // The total storage of the value,
-  };                                  // typically rounded up for alignment.
-
-  void SetReference(StringPiece name,
-                    ValueType type,
-                    const void* memory,
-                    size_t size);
-
-  // Loads any data already in the memory segment. This allows for accessing
-  // records created previously. If this detects that the underlying data has
-  // gone away (cleared by another thread/process), it will invalidate all the
-  // data in this object and turn it into simple "sink" with no values to
-  // return.
-  void ImportExistingData() const;
-
-  // A map of all the values within the memory block, keyed by name for quick
-  // updates of the values. This is "mutable" because it changes on "const"
-  // objects even when the actual data values can't change.
-  mutable std::map<StringPiece, ValueInfo> values_;
-
-  // Information about the memory block in which new data can be stored. These
-  // are "mutable" because they change even on "const" objects that are just
-  // skipping already set values.
-  mutable char* memory_;
-  mutable size_t available_;
-
-  // A pointer to the memory header for this instance.
-  MemoryHeader* const header_;
-
-  // These hold values used when initially creating the object. They are
-  // compared against current header values to check for outside changes.
-  const uint32_t orig_data_id;
-  const int64_t orig_process_id;
-  const int64_t orig_create_stamp;
-
-  DISALLOW_COPY_AND_ASSIGN(ActivityUserData);
-};
-
-// This class manages tracking a stack of activities for a single thread in
-// a persistent manner, implementing a bounded-size stack in a fixed-size
-// memory allocation. In order to support an operational mode where another
-// thread is analyzing this data in real-time, atomic operations are used
-// where necessary to guarantee a consistent view from the outside.
-//
-// This class is not generally used directly but instead managed by the
-// GlobalActivityTracker instance and updated using Scoped*Activity local
-// objects.
-class BASE_EXPORT ThreadActivityTracker {
- public:
-  using ActivityId = uint32_t;
-
-  // This structure contains all the common information about the thread so
-  // it doesn't have to be repeated in every entry on the stack. It is defined
-  // and used completely within the .cc file.
-  struct Header;
-
-  // This structure holds a copy of all the internal data at the moment the
-  // "snapshot" operation is done. It is disconnected from the live tracker
-  // so that continued operation of the thread will not cause changes here.
-  struct BASE_EXPORT Snapshot {
-    // Explicit constructor/destructor are needed because of complex types
-    // with non-trivial default constructors and destructors.
-    Snapshot();
-    ~Snapshot();
-
-    // The name of the thread as set when it was created. The name may be
-    // truncated due to internal length limitations.
-    std::string thread_name;
-
-    // The timestamp at which this process was created.
-    int64_t create_stamp;
-
-    // The process and thread IDs. These values have no meaning other than
-    // they uniquely identify a running process and a running thread within
-    // that process.  Thread-IDs can be re-used across different processes
-    // and both can be re-used after the process/thread exits.
-    int64_t process_id = 0;
-    int64_t thread_id = 0;
-
-    // The current stack of activities that are underway for this thread. It
-    // is limited in its maximum size with later entries being left off.
-    std::vector<Activity> activity_stack;
-
-    // The current total depth of the activity stack, including those later
-    // entries not recorded in the |activity_stack| vector.
-    uint32_t activity_stack_depth = 0;
-
-    // The last recorded "exception" activity.
-    Activity last_exception;
-  };
-
-  // This is the base class for having the compiler manage an activity on the
-  // tracker's stack. It does nothing but call methods on the passed |tracker|
-  // if it is not null, making it safe (and cheap) to create these objects
-  // even if activity tracking is not enabled.
-  class BASE_EXPORT ScopedActivity {
-   public:
-    ScopedActivity(ThreadActivityTracker* tracker,
-                   const void* program_counter,
-                   const void* origin,
-                   Activity::Type type,
-                   const ActivityData& data);
-    ~ScopedActivity();
-
-    // Changes some basic metadata about the activity.
-    void ChangeTypeAndData(Activity::Type type, const ActivityData& data);
-
-   protected:
-    // The thread tracker to which this object reports. It can be null if
-    // activity tracking is not (yet) enabled.
-    ThreadActivityTracker* const tracker_;
-
-    // An identifier that indicates a specific activity on the stack.
-    ActivityId activity_id_;
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(ScopedActivity);
-  };
-
-  // A ThreadActivityTracker runs on top of memory that is managed externally.
-  // It must be large enough for the internal header and a few Activity
-  // blocks. See SizeForStackDepth().
-  ThreadActivityTracker(void* base, size_t size);
-  virtual ~ThreadActivityTracker();
-
-  // Indicates that an activity has started from a given |origin| address in
-  // the code, though it can be null if the creator's address is not known.
-  // The |type| and |data| describe the activity. |program_counter| should be
-  // the result of GetProgramCounter() where push is called. Returned is an
-  // ID that can be used to adjust the pushed activity.
-  ActivityId PushActivity(const void* program_counter,
-                          const void* origin,
-                          Activity::Type type,
-                          const ActivityData& data);
-
-  // An inlined version of the above that gets the program counter where it
-  // is called.
-  ALWAYS_INLINE
-  ActivityId PushActivity(const void* origin,
-                          Activity::Type type,
-                          const ActivityData& data) {
-    return PushActivity(GetProgramCounter(), origin, type, data);
-  }
-
-  // Changes the activity |type| and |data| of the top-most entry on the stack.
-  // This is useful if the information has changed and it is desireable to
-  // track that change without creating a new stack entry. If the type is
-  // ACT_NULL or the data is kNullActivityData then that value will remain
-  // unchanged. The type, if changed, must remain in the same category.
-  // Changing both is not atomic so a snapshot operation could occur between
-  // the update of |type| and |data| or between update of |data| fields.
-  void ChangeActivity(ActivityId id,
-                      Activity::Type type,
-                      const ActivityData& data);
-
-  // Indicates that an activity has completed.
-  void PopActivity(ActivityId id);
-
-  // Sets the user-data information for an activity.
-  std::unique_ptr<ActivityUserData> GetUserData(
-      ActivityId id,
-      ActivityTrackerMemoryAllocator* allocator);
-
-  // Returns if there is true use-data associated with a given ActivityId since
-  // it's possible than any returned object is just a sink.
-  bool HasUserData(ActivityId id);
-
-  // Release the user-data information for an activity.
-  void ReleaseUserData(ActivityId id,
-                       ActivityTrackerMemoryAllocator* allocator);
-
-  // Save an exception. |origin| is the location of the exception.
-  void RecordExceptionActivity(const void* program_counter,
-                               const void* origin,
-                               Activity::Type type,
-                               const ActivityData& data);
-
-  // Returns whether the current data is valid or not. It is not valid if
-  // corruption has been detected in the header or other data structures.
-  bool IsValid() const;
-
-  // Gets a copy of the tracker contents for analysis. Returns false if a
-  // snapshot was not possible, perhaps because the data is not valid; the
-  // contents of |output_snapshot| are undefined in that case. The current
-  // implementation does not support concurrent snapshot operations.
-  bool CreateSnapshot(Snapshot* output_snapshot) const;
-
-  // Gets the base memory address used for storing data.
-  const void* GetBaseAddress();
-
-  // Access the "data version" value so tests can determine if an activity
-  // was pushed and popped in a single call.
-  uint32_t GetDataVersionForTesting();
-
-  // Explicitly sets the process ID.
-  void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
-
-  // Gets the associated process ID, in native form, and the creation timestamp
-  // from tracker memory without loading the entire structure for analysis. This
-  // will return false if no valid process ID is available.
-  static bool GetOwningProcessId(const void* memory,
-                                 int64_t* out_id,
-                                 int64_t* out_stamp);
-
-  // Calculates the memory size required for a given stack depth, including
-  // the internal header structure for the stack.
-  static size_t SizeForStackDepth(int stack_depth);
-
- private:
-  friend class ActivityTrackerTest;
-
-  bool CalledOnValidThread();
-
-  std::unique_ptr<ActivityUserData> CreateUserDataForActivity(
-      Activity* activity,
-      ActivityTrackerMemoryAllocator* allocator);
-
-  Header* const header_;        // Pointer to the Header structure.
-  Activity* const stack_;       // The stack of activities.
-
-#if DCHECK_IS_ON()
-  // The ActivityTracker is thread bound, and will be invoked across all the
-  // sequences that run on the thread. A ThreadChecker does not work here, as it
-  // asserts on running in the same sequence each time.
-  const PlatformThreadRef thread_id_;  // The thread this instance is bound to.
-#endif
-  const uint32_t stack_slots_;  // The total number of stack slots.
-
-  bool valid_ = false;          // Tracks whether the data is valid or not.
-
-  DISALLOW_COPY_AND_ASSIGN(ThreadActivityTracker);
-};
-
-
-// The global tracker manages all the individual thread trackers. Memory for
-// the thread trackers is taken from a PersistentMemoryAllocator which allows
-// for the data to be analyzed by a parallel process or even post-mortem.
-class BASE_EXPORT GlobalActivityTracker {
- public:
-  // Type identifiers used when storing in persistent memory so they can be
-  // identified during extraction; the first 4 bytes of the SHA1 of the name
-  // is used as a unique integer. A "version number" is added to the base
-  // so that, if the structure of that object changes, stored older versions
-  // will be safely ignored. These are public so that an external process
-  // can recognize records of this type within an allocator.
-  enum : uint32_t {
-    kTypeIdActivityTracker = 0x5D7381AF + 4,   // SHA1(ActivityTracker) v4
-    kTypeIdUserDataRecord = 0x615EDDD7 + 3,    // SHA1(UserDataRecord) v3
-    kTypeIdGlobalLogMessage = 0x4CF434F9 + 1,  // SHA1(GlobalLogMessage) v1
-    kTypeIdProcessDataRecord = kTypeIdUserDataRecord + 0x100,
-
-    kTypeIdActivityTrackerFree = ~kTypeIdActivityTracker,
-    kTypeIdUserDataRecordFree = ~kTypeIdUserDataRecord,
-    kTypeIdProcessDataRecordFree = ~kTypeIdProcessDataRecord,
-  };
-
-  // An enumeration of common process life stages. All entries are given an
-  // explicit number so they are known and remain constant; this allows for
-  // cross-version analysis either locally or on a server.
-  enum ProcessPhase : int {
-    // The phases are generic and may have meaning to the tracker.
-    PROCESS_PHASE_UNKNOWN = 0,
-    PROCESS_LAUNCHED = 1,
-    PROCESS_LAUNCH_FAILED = 2,
-    PROCESS_EXITED_CLEANLY = 10,
-    PROCESS_EXITED_WITH_CODE = 11,
-
-    // Add here whatever is useful for analysis.
-    PROCESS_SHUTDOWN_STARTED = 100,
-    PROCESS_MAIN_LOOP_STARTED = 101,
-  };
-
-  // A callback made when a process exits to allow immediate analysis of its
-  // data. Note that the system may reuse the |process_id| so when fetching
-  // records it's important to ensure that what is returned was created before
-  // the |exit_stamp|. Movement of |process_data| information is allowed.
-  using ProcessExitCallback =
-      Callback<void(int64_t process_id,
-                    int64_t exit_stamp,
-                    int exit_code,
-                    ProcessPhase exit_phase,
-                    std::string&& command_line,
-                    ActivityUserData::Snapshot&& process_data)>;
-
-  // This structure contains information about a loaded module, as shown to
-  // users of the tracker.
-  struct BASE_EXPORT ModuleInfo {
-    ModuleInfo();
-    ModuleInfo(ModuleInfo&& rhs);
-    ModuleInfo(const ModuleInfo& rhs);
-    ~ModuleInfo();
-
-    ModuleInfo& operator=(ModuleInfo&& rhs);
-    ModuleInfo& operator=(const ModuleInfo& rhs);
-
-    // Information about where and when the module was loaded/unloaded.
-    bool is_loaded = false;  // Was the last operation a load or unload?
-    uintptr_t address = 0;   // Address of the last load operation.
-    int64_t load_time = 0;   // Time of last change; set automatically.
-
-    // Information about the module itself. These never change no matter how
-    // many times a module may be loaded and unloaded.
-    size_t size = 0;         // The size of the loaded module.
-    uint32_t timestamp = 0;  // Opaque "timestamp" for the module.
-    uint32_t age = 0;        // Opaque "age" for the module.
-    uint8_t identifier[16];  // Opaque identifier (GUID, etc.) for the module.
-    std::string file;        // The full path to the file. (UTF-8)
-    std::string debug_file;  // The full path to the debug file.
-  };
-
-  // This is a thin wrapper around the thread-tracker's ScopedActivity that
-  // allows thread-safe access to data values. It is safe to use even if
-  // activity tracking is not enabled.
-  class BASE_EXPORT ScopedThreadActivity
-      : public ThreadActivityTracker::ScopedActivity {
-   public:
-    ScopedThreadActivity(const void* program_counter,
-                         const void* origin,
-                         Activity::Type type,
-                         const ActivityData& data,
-                         bool lock_allowed);
-    ~ScopedThreadActivity();
-
-    // Returns an object for manipulating user data.
-    ActivityUserData& user_data();
-
-   private:
-    // Gets (or creates) a tracker for the current thread. If locking is not
-    // allowed (because a lock is being tracked which would cause recursion)
-    // then the attempt to create one if none found will be skipped. Once
-    // the tracker for this thread has been created for other reasons, locks
-    // will be tracked. The thread-tracker uses locks.
-    static ThreadActivityTracker* GetOrCreateTracker(bool lock_allowed) {
-      GlobalActivityTracker* global_tracker = Get();
-      if (!global_tracker)
-        return nullptr;
-      if (lock_allowed)
-        return global_tracker->GetOrCreateTrackerForCurrentThread();
-      else
-        return global_tracker->GetTrackerForCurrentThread();
-    }
-
-    // An object that manages additional user data, created only upon request.
-    std::unique_ptr<ActivityUserData> user_data_;
-
-    DISALLOW_COPY_AND_ASSIGN(ScopedThreadActivity);
-  };
-
-  ~GlobalActivityTracker();
-
-  // Creates a global tracker using a given persistent-memory |allocator| and
-  // providing the given |stack_depth| to each thread tracker it manages. The
-  // created object is activated so tracking will begin immediately upon return.
-  // The |process_id| can be zero to get it from the OS but is taken for testing
-  // purposes.
-  static void CreateWithAllocator(
-      std::unique_ptr<PersistentMemoryAllocator> allocator,
-      int stack_depth,
-      int64_t process_id);
-
-#if !defined(OS_NACL)
-  // Like above but internally creates an allocator around a disk file with
-  // the specified |size| at the given |file_path|. Any existing file will be
-  // overwritten. The |id| and |name| are arbitrary and stored in the allocator
-  // for reference by whatever process reads it. Returns true if successful.
-  static bool CreateWithFile(const FilePath& file_path,
-                             size_t size,
-                             uint64_t id,
-                             StringPiece name,
-                             int stack_depth);
-#endif  // !defined(OS_NACL)
-
-  // Like above but internally creates an allocator using local heap memory of
-  // the specified size. This is used primarily for unit tests. The |process_id|
-  // can be zero to get it from the OS but is taken for testing purposes.
-  static bool CreateWithLocalMemory(size_t size,
-                                    uint64_t id,
-                                    StringPiece name,
-                                    int stack_depth,
-                                    int64_t process_id);
-
-  // Like above but internally creates an allocator using a shared-memory
-  // segment. The segment must already be mapped into the local memory space.
-  static bool CreateWithSharedMemory(std::unique_ptr<SharedMemory> shm,
-                                     uint64_t id,
-                                     StringPiece name,
-                                     int stack_depth);
-
-  // Like above but takes a handle to an existing shared memory segment and
-  // maps it before creating the tracker.
-  static bool CreateWithSharedMemoryHandle(const SharedMemoryHandle& handle,
-                                           size_t size,
-                                           uint64_t id,
-                                           StringPiece name,
-                                           int stack_depth);
-
-  // Gets the global activity-tracker or null if none exists.
-  static GlobalActivityTracker* Get() {
-    return reinterpret_cast<GlobalActivityTracker*>(
-        subtle::Acquire_Load(&g_tracker_));
-  }
-
-  // Sets the global activity-tracker for testing purposes.
-  static void SetForTesting(std::unique_ptr<GlobalActivityTracker> tracker);
-
-  // This access to the persistent allocator is only for testing; it extracts
-  // the global tracker completely. All tracked threads must exit before
-  // calling this. Tracking for the current thread will be automatically
-  // stopped.
-  static std::unique_ptr<GlobalActivityTracker> ReleaseForTesting();
-
-  // Convenience method for determining if a global tracker is active.
-  static bool IsEnabled() { return Get() != nullptr; }
-
-  // Gets the persistent-memory-allocator in which data is stored. Callers
-  // can store additional records here to pass more information to the
-  // analysis process.
-  PersistentMemoryAllocator* allocator() { return allocator_.get(); }
-
-  // Gets the thread's activity-tracker if it exists. This is inline for
-  // performance reasons and it uses thread-local-storage (TLS) so that there
-  // is no significant lookup time required to find the one for the calling
-  // thread. Ownership remains with the global tracker.
-  ThreadActivityTracker* GetTrackerForCurrentThread() {
-    return reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
-  }
-
-  // Gets the thread's activity-tracker or creates one if none exists. This
-  // is inline for performance reasons. Ownership remains with the global
-  // tracker.
-  ThreadActivityTracker* GetOrCreateTrackerForCurrentThread() {
-    ThreadActivityTracker* tracker = GetTrackerForCurrentThread();
-    if (tracker)
-      return tracker;
-    return CreateTrackerForCurrentThread();
-  }
-
-  // Creates an activity-tracker for the current thread.
-  ThreadActivityTracker* CreateTrackerForCurrentThread();
-
-  // Releases the activity-tracker for the current thread (for testing only).
-  void ReleaseTrackerForCurrentThreadForTesting();
-
-  // Sets a task-runner that can be used for background work.
-  void SetBackgroundTaskRunner(const scoped_refptr<TaskRunner>& runner);
-
-  // Sets an optional callback to be called when a process exits.
-  void SetProcessExitCallback(ProcessExitCallback callback);
-
-  // Manages process lifetimes. These are called by the process that launched
-  // and reaped the subprocess, not the subprocess itself. If it is expensive
-  // to generate the parameters, Get() the global tracker and call these
-  // conditionally rather than using the static versions.
-  void RecordProcessLaunch(ProcessId process_id,
-                           const FilePath::StringType& cmd);
-  void RecordProcessLaunch(ProcessId process_id,
-                           const FilePath::StringType& exe,
-                           const FilePath::StringType& args);
-  void RecordProcessExit(ProcessId process_id, int exit_code);
-  static void RecordProcessLaunchIfEnabled(ProcessId process_id,
-                                           const FilePath::StringType& cmd) {
-    GlobalActivityTracker* tracker = Get();
-    if (tracker)
-      tracker->RecordProcessLaunch(process_id, cmd);
-  }
-  static void RecordProcessLaunchIfEnabled(ProcessId process_id,
-                                           const FilePath::StringType& exe,
-                                           const FilePath::StringType& args) {
-    GlobalActivityTracker* tracker = Get();
-    if (tracker)
-      tracker->RecordProcessLaunch(process_id, exe, args);
-  }
-  static void RecordProcessExitIfEnabled(ProcessId process_id, int exit_code) {
-    GlobalActivityTracker* tracker = Get();
-    if (tracker)
-      tracker->RecordProcessExit(process_id, exit_code);
-  }
-
-  // Sets the "phase" of the current process, useful for knowing what it was
-  // doing when it last reported.
-  void SetProcessPhase(ProcessPhase phase);
-  static void SetProcessPhaseIfEnabled(ProcessPhase phase) {
-    GlobalActivityTracker* tracker = Get();
-    if (tracker)
-      tracker->SetProcessPhase(phase);
-  }
-
-  // Records a log message. The current implementation does NOT recycle these
-  // only store critical messages such as FATAL ones.
-  void RecordLogMessage(StringPiece message);
-  static void RecordLogMessageIfEnabled(StringPiece message) {
-    GlobalActivityTracker* tracker = Get();
-    if (tracker)
-      tracker->RecordLogMessage(message);
-  }
-
-  // Records a module load/unload event. This is safe to call multiple times
-  // even with the same information.
-  void RecordModuleInfo(const ModuleInfo& info);
-  static void RecordModuleInfoIfEnabled(const ModuleInfo& info) {
-    GlobalActivityTracker* tracker = Get();
-    if (tracker)
-      tracker->RecordModuleInfo(info);
-  }
-
-  // Record field trial information. This call is thread-safe. In addition to
-  // this, construction of a GlobalActivityTracker will cause all existing
-  // active field trials to be fetched and recorded.
-  void RecordFieldTrial(const std::string& trial_name, StringPiece group_name);
-  static void RecordFieldTrialIfEnabled(const std::string& trial_name,
-                                        StringPiece group_name) {
-    GlobalActivityTracker* tracker = Get();
-    if (tracker)
-      tracker->RecordFieldTrial(trial_name, group_name);
-  }
-
-  // Record exception information for the current thread.
-  ALWAYS_INLINE
-  void RecordException(const void* origin, uint32_t code) {
-    return RecordExceptionImpl(GetProgramCounter(), origin, code);
-  }
-  void RecordException(const void* pc, const void* origin, uint32_t code);
-
-  // Marks the tracked data as deleted.
-  void MarkDeleted();
-
-  // Gets the process ID used for tracking. This is typically the same as what
-  // the OS thinks is the current process but can be overridden for testing.
-  int64_t process_id() { return process_id_; }
-
-  // Accesses the process data record for storing arbitrary key/value pairs.
-  // Updates to this are thread-safe.
-  ActivityUserData& process_data() { return process_data_; }
-
- private:
-  friend class GlobalActivityAnalyzer;
-  friend class ScopedThreadActivity;
-  friend class ActivityTrackerTest;
-
-  enum : int {
-    // The maximum number of threads that can be tracked within a process. If
-    // more than this number run concurrently, tracking of new ones may cease.
-    kMaxThreadCount = 100,
-    kCachedThreadMemories = 10,
-    kCachedUserDataMemories = 10,
-  };
-
-  // A wrapper around ActivityUserData that is thread-safe and thus can be used
-  // in the global scope without the requirement of being called from only one
-  // thread.
-  class ThreadSafeUserData : public ActivityUserData {
-   public:
-    ThreadSafeUserData(void* memory, size_t size, int64_t pid = 0);
-    ~ThreadSafeUserData() override;
-
-   private:
-    void Set(StringPiece name,
-             ValueType type,
-             const void* memory,
-             size_t size) override;
-
-    Lock data_lock_;
-
-    DISALLOW_COPY_AND_ASSIGN(ThreadSafeUserData);
-  };
-
-  // State of a module as stored in persistent memory. This supports a single
-  // loading of a module only. If modules are loaded multiple times at
-  // different addresses, only the last will be recorded and an unload will
-  // not revert to the information of any other addresses.
-  struct BASE_EXPORT ModuleInfoRecord {
-    // SHA1(ModuleInfoRecord): Increment this if structure changes!
-    static constexpr uint32_t kPersistentTypeId = 0x05DB5F41 + 1;
-
-    // Expected size for 32/64-bit check by PersistentMemoryAllocator.
-    static constexpr size_t kExpectedInstanceSize =
-        OwningProcess::kExpectedInstanceSize + 56;
-
-    // The atomic unfortunately makes this a "complex" class on some compilers
-    // and thus requires an out-of-line constructor & destructor even though
-    // they do nothing.
-    ModuleInfoRecord();
-    ~ModuleInfoRecord();
-
-    OwningProcess owner;            // The process that created this record.
-    uint64_t address;               // The base address of the module.
-    uint64_t load_time;             // Time of last load/unload.
-    uint64_t size;                  // The size of the module in bytes.
-    uint32_t timestamp;             // Opaque timestamp of the module.
-    uint32_t age;                   // Opaque "age" associated with the module.
-    uint8_t identifier[16];         // Opaque identifier for the module.
-    std::atomic<uint32_t> changes;  // Number load/unload actions.
-    uint16_t pickle_size;           // The size of the following pickle.
-    uint8_t loaded;                 // Flag if module is loaded or not.
-    char pickle[1];                 // Other strings; may allocate larger.
-
-    // Decodes/encodes storage structure from more generic info structure.
-    bool DecodeTo(GlobalActivityTracker::ModuleInfo* info,
-                  size_t record_size) const;
-    static ModuleInfoRecord* CreateFrom(
-        const GlobalActivityTracker::ModuleInfo& info,
-        PersistentMemoryAllocator* allocator);
-
-    // Updates the core information without changing the encoded strings. This
-    // is useful when a known module changes state (i.e. new load or unload).
-    bool UpdateFrom(const GlobalActivityTracker::ModuleInfo& info);
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(ModuleInfoRecord);
-  };
-
-  // A thin wrapper around the main thread-tracker that keeps additional
-  // information that the global tracker needs to handle joined threads.
-  class ManagedActivityTracker : public ThreadActivityTracker {
-   public:
-    ManagedActivityTracker(PersistentMemoryAllocator::Reference mem_reference,
-                           void* base,
-                           size_t size);
-    ~ManagedActivityTracker() override;
-
-    // The reference into persistent memory from which the thread-tracker's
-    // memory was created.
-    const PersistentMemoryAllocator::Reference mem_reference_;
-
-    // The physical address used for the thread-tracker's memory.
-    void* const mem_base_;
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(ManagedActivityTracker);
-  };
-
-  // Creates a global tracker using a given persistent-memory |allocator| and
-  // providing the given |stack_depth| to each thread tracker it manages. The
-  // created object is activated so tracking has already started upon return.
-  // The |process_id| can be zero to get it from the OS but is taken for testing
-  // purposes.
-  GlobalActivityTracker(std::unique_ptr<PersistentMemoryAllocator> allocator,
-                        int stack_depth,
-                        int64_t process_id);
-
-  // Returns the memory used by an activity-tracker managed by this class.
-  // It is called during the destruction of a ManagedActivityTracker object.
-  void ReturnTrackerMemory(ManagedActivityTracker* tracker);
-
-  // Records exception information.
-  void RecordExceptionImpl(const void* pc, const void* origin, uint32_t code);
-
-  // Releases the activity-tracker associcated with thread. It is called
-  // automatically when a thread is joined and thus there is nothing more to
-  // be tracked. |value| is a pointer to a ManagedActivityTracker.
-  static void OnTLSDestroy(void* value);
-
-  // Does process-exit work. This can be run on any thread.
-  void CleanupAfterProcess(int64_t process_id,
-                           int64_t exit_stamp,
-                           int exit_code,
-                           std::string&& command_line);
-
-  // The persistent-memory allocator from which the memory for all trackers
-  // is taken.
-  std::unique_ptr<PersistentMemoryAllocator> allocator_;
-
-  // The size (in bytes) of memory required by a ThreadActivityTracker to
-  // provide the stack-depth requested during construction.
-  const size_t stack_memory_size_;
-
-  // The process-id of the current process. This is kept as a member variable,
-  // defined during initialization, for testing purposes.
-  const int64_t process_id_;
-
-  // The activity tracker for the currently executing thread.
-  ThreadLocalStorage::Slot this_thread_tracker_;
-
-  // The number of thread trackers currently active.
-  std::atomic<int> thread_tracker_count_;
-
-  // A caching memory allocator for thread-tracker objects.
-  ActivityTrackerMemoryAllocator thread_tracker_allocator_;
-  Lock thread_tracker_allocator_lock_;
-
-  // A caching memory allocator for user data attached to activity data.
-  ActivityTrackerMemoryAllocator user_data_allocator_;
-  Lock user_data_allocator_lock_;
-
-  // An object for holding arbitrary key value pairs with thread-safe access.
-  ThreadSafeUserData process_data_;
-
-  // A map of global module information, keyed by module path.
-  std::map<const std::string, ModuleInfoRecord*> modules_;
-  Lock modules_lock_;
-
-  // The active global activity tracker.
-  static subtle::AtomicWord g_tracker_;
-
-  // A lock that is used to protect access to the following fields.
-  Lock global_tracker_lock_;
-
-  // The collection of processes being tracked and their command-lines.
-  std::map<int64_t, std::string> known_processes_;
-
-  // A task-runner that can be used for doing background processing.
-  scoped_refptr<TaskRunner> background_task_runner_;
-
-  // A callback performed when a subprocess exits, including its exit-code
-  // and the phase it was in when that occurred. This will be called via
-  // the |background_task_runner_| if one is set or whatever thread reaped
-  // the process otherwise.
-  ProcessExitCallback process_exit_callback_;
-
-  DISALLOW_COPY_AND_ASSIGN(GlobalActivityTracker);
-};
-
-
-// Record entry in to and out of an arbitrary block of code.
-class BASE_EXPORT ScopedActivity
-    : public GlobalActivityTracker::ScopedThreadActivity {
- public:
-  // Track activity at the specified FROM_HERE location for an arbitrary
-  // 4-bit |action|, an arbitrary 32-bit |id|, and 32-bits of arbitrary
-  // |info|. None of these values affect operation; they're all purely
-  // for association and analysis. To have unique identifiers across a
-  // diverse code-base, create the number by taking the first 8 characters
-  // of the hash of the activity being tracked.
-  //
-  // For example:
-  //   Tracking method: void MayNeverExit(uint32_t foo) {...}
-  //   echo -n "MayNeverExit" | sha1sum   =>   e44873ccab21e2b71270da24aa1...
-  //
-  //   void MayNeverExit(int32_t foo) {
-  //     base::debug::ScopedActivity track_me(0, 0xE44873CC, foo);
-  //     ...
-  //   }
-  ALWAYS_INLINE
-  ScopedActivity(uint8_t action, uint32_t id, int32_t info)
-      : ScopedActivity(GetProgramCounter(), action, id, info) {}
-  ScopedActivity() : ScopedActivity(0, 0, 0) {}
-
-  // Changes the |action| and/or |info| of this activity on the stack. This
-  // is useful for tracking progress through a function, updating the action
-  // to indicate "milestones" in the block (max 16 milestones: 0-15) or the
-  // info to reflect other changes. Changing both is not atomic so a snapshot
-  // operation could occur between the update of |action| and |info|.
-  void ChangeAction(uint8_t action);
-  void ChangeInfo(int32_t info);
-  void ChangeActionAndInfo(uint8_t action, int32_t info);
-
- private:
-  // Constructs the object using a passed-in program-counter.
-  ScopedActivity(const void* program_counter,
-                 uint8_t action,
-                 uint32_t id,
-                 int32_t info);
-
-  // A copy of the ID code so it doesn't have to be passed by the caller when
-  // changing the |info| field.
-  uint32_t id_;
-
-  DISALLOW_COPY_AND_ASSIGN(ScopedActivity);
-};
-
-
-// These "scoped" classes provide easy tracking of various blocking actions.
-
-class BASE_EXPORT ScopedTaskRunActivity
-    : public GlobalActivityTracker::ScopedThreadActivity {
- public:
-  ALWAYS_INLINE
-  explicit ScopedTaskRunActivity(const PendingTask& task)
-      : ScopedTaskRunActivity(GetProgramCounter(), task) {}
-
- private:
-  ScopedTaskRunActivity(const void* program_counter, const PendingTask& task);
-  DISALLOW_COPY_AND_ASSIGN(ScopedTaskRunActivity);
-};
-
-class BASE_EXPORT ScopedLockAcquireActivity
-    : public GlobalActivityTracker::ScopedThreadActivity {
- public:
-  ALWAYS_INLINE
-  explicit ScopedLockAcquireActivity(const base::internal::LockImpl* lock)
-      : ScopedLockAcquireActivity(GetProgramCounter(), lock) {}
-
- private:
-  ScopedLockAcquireActivity(const void* program_counter,
-                            const base::internal::LockImpl* lock);
-  DISALLOW_COPY_AND_ASSIGN(ScopedLockAcquireActivity);
-};
-
-class BASE_EXPORT ScopedEventWaitActivity
-    : public GlobalActivityTracker::ScopedThreadActivity {
- public:
-  ALWAYS_INLINE
-  explicit ScopedEventWaitActivity(const WaitableEvent* event)
-      : ScopedEventWaitActivity(GetProgramCounter(), event) {}
-
- private:
-  ScopedEventWaitActivity(const void* program_counter,
-                          const WaitableEvent* event);
-  DISALLOW_COPY_AND_ASSIGN(ScopedEventWaitActivity);
-};
-
-class BASE_EXPORT ScopedThreadJoinActivity
-    : public GlobalActivityTracker::ScopedThreadActivity {
- public:
-  ALWAYS_INLINE
-  explicit ScopedThreadJoinActivity(const PlatformThreadHandle* thread)
-      : ScopedThreadJoinActivity(GetProgramCounter(), thread) {}
-
- private:
-  ScopedThreadJoinActivity(const void* program_counter,
-                           const PlatformThreadHandle* thread);
-  DISALLOW_COPY_AND_ASSIGN(ScopedThreadJoinActivity);
-};
-
-// Some systems don't have base::Process
-#if !defined(OS_NACL) && !defined(OS_IOS)
-class BASE_EXPORT ScopedProcessWaitActivity
-    : public GlobalActivityTracker::ScopedThreadActivity {
- public:
-  ALWAYS_INLINE
-  explicit ScopedProcessWaitActivity(const Process* process)
-      : ScopedProcessWaitActivity(GetProgramCounter(), process) {}
-
- private:
-  ScopedProcessWaitActivity(const void* program_counter,
-                            const Process* process);
-  DISALLOW_COPY_AND_ASSIGN(ScopedProcessWaitActivity);
-};
-#endif
-
-}  // namespace debug
-}  // namespace base
-
-#endif  // BASE_DEBUG_ACTIVITY_TRACKER_H_
diff --git a/base/debug/task_annotator.cc b/base/debug/task_annotator.cc
index def2d7a..69b9c04 100644
--- a/base/debug/task_annotator.cc
+++ b/base/debug/task_annotator.cc
@@ -6,7 +6,6 @@
 
 #include <array>
 
-#include "base/debug/activity_tracker.h"
 #include "base/debug/alias.h"
 #include "base/no_destructor.h"
 #include "base/pending_task.h"
@@ -54,8 +53,6 @@
 
 void TaskAnnotator::RunTask(const char* queue_function,
                             PendingTask* pending_task) {
-  ScopedTaskRunActivity task_activity(*pending_task);
-
   // Before running the task, store the task backtrace with the chain of
   // PostTasks that resulted in this call and deliberately alias it to ensure
   // it is on the stack if the task crashes. Be careful not to assume that the
diff --git a/base/feature_list.cc b/base/feature_list.cc
deleted file mode 100644
index 1610eec..0000000
--- a/base/feature_list.cc
+++ /dev/null
@@ -1,438 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/feature_list.h"
-
-#include <stddef.h>
-
-#include <utility>
-#include <vector>
-
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/metrics/field_trial.h"
-#include "base/pickle.h"
-#include "base/strings/string_split.h"
-#include "base/strings/string_util.h"
-
-namespace base {
-
-namespace {
-
-// Pointer to the FeatureList instance singleton that was set via
-// FeatureList::SetInstance(). Does not use base/memory/singleton.h in order to
-// have more control over initialization timing. Leaky.
-FeatureList* g_feature_list_instance = nullptr;
-
-// Tracks whether the FeatureList instance was initialized via an accessor.
-bool g_initialized_from_accessor = false;
-
-// An allocator entry for a feature in shared memory. The FeatureEntry is
-// followed by a base::Pickle object that contains the feature and trial name.
-struct FeatureEntry {
-  // SHA1(FeatureEntry): Increment this if structure changes!
-  static constexpr uint32_t kPersistentTypeId = 0x06567CA6 + 1;
-
-  // Expected size for 32/64-bit check.
-  static constexpr size_t kExpectedInstanceSize = 8;
-
-  // Specifies whether a feature override enables or disables the feature. Same
-  // values as the OverrideState enum in feature_list.h
-  uint32_t override_state;
-
-  // Size of the pickled structure, NOT the total size of this entry.
-  uint32_t pickle_size;
-
-  // Reads the feature and trial name from the pickle. Calling this is only
-  // valid on an initialized entry that's in shared memory.
-  bool GetFeatureAndTrialName(StringPiece* feature_name,
-                              StringPiece* trial_name) const {
-    const char* src =
-        reinterpret_cast<const char*>(this) + sizeof(FeatureEntry);
-
-    Pickle pickle(src, pickle_size);
-    PickleIterator pickle_iter(pickle);
-
-    if (!pickle_iter.ReadStringPiece(feature_name))
-      return false;
-
-    // Return true because we are not guaranteed to have a trial name anyways.
-    auto sink = pickle_iter.ReadStringPiece(trial_name);
-    ALLOW_UNUSED_LOCAL(sink);
-    return true;
-  }
-};
-
-// Some characters are not allowed to appear in feature names or the associated
-// field trial names, as they are used as special characters for command-line
-// serialization. This function checks that the strings are ASCII (since they
-// are used in command-line API functions that require ASCII) and whether there
-// are any reserved characters present, returning true if the string is valid.
-// Only called in DCHECKs.
-bool IsValidFeatureOrFieldTrialName(const std::string& name) {
-  return IsStringASCII(name) && name.find_first_of(",<*") == std::string::npos;
-}
-
-}  // namespace
-
-#if DCHECK_IS_CONFIGURABLE
-const Feature kDCheckIsFatalFeature{"DcheckIsFatal",
-                                    base::FEATURE_DISABLED_BY_DEFAULT};
-#endif  // DCHECK_IS_CONFIGURABLE
-
-FeatureList::FeatureList() = default;
-
-FeatureList::~FeatureList() = default;
-
-void FeatureList::InitializeFromCommandLine(
-    const std::string& enable_features,
-    const std::string& disable_features) {
-  DCHECK(!initialized_);
-
-  // Process disabled features first, so that disabled ones take precedence over
-  // enabled ones (since RegisterOverride() uses insert()).
-  RegisterOverridesFromCommandLine(disable_features, OVERRIDE_DISABLE_FEATURE);
-  RegisterOverridesFromCommandLine(enable_features, OVERRIDE_ENABLE_FEATURE);
-
-  initialized_from_command_line_ = true;
-}
-
-void FeatureList::InitializeFromSharedMemory(
-    PersistentMemoryAllocator* allocator) {
-  DCHECK(!initialized_);
-
-  PersistentMemoryAllocator::Iterator iter(allocator);
-  const FeatureEntry* entry;
-  while ((entry = iter.GetNextOfObject<FeatureEntry>()) != nullptr) {
-    OverrideState override_state =
-        static_cast<OverrideState>(entry->override_state);
-
-    StringPiece feature_name;
-    StringPiece trial_name;
-    if (!entry->GetFeatureAndTrialName(&feature_name, &trial_name))
-      continue;
-
-    FieldTrial* trial = FieldTrialList::Find(trial_name.as_string());
-    RegisterOverride(feature_name, override_state, trial);
-  }
-}
-
-bool FeatureList::IsFeatureOverriddenFromCommandLine(
-    const std::string& feature_name,
-    OverrideState state) const {
-  auto it = overrides_.find(feature_name);
-  return it != overrides_.end() && it->second.overridden_state == state &&
-         !it->second.overridden_by_field_trial;
-}
-
-void FeatureList::AssociateReportingFieldTrial(
-    const std::string& feature_name,
-    OverrideState for_overridden_state,
-    FieldTrial* field_trial) {
-  DCHECK(
-      IsFeatureOverriddenFromCommandLine(feature_name, for_overridden_state));
-
-  // Only one associated field trial is supported per feature. This is generally
-  // enforced server-side.
-  OverrideEntry* entry = &overrides_.find(feature_name)->second;
-  if (entry->field_trial) {
-    NOTREACHED() << "Feature " << feature_name
-                 << " already has trial: " << entry->field_trial->trial_name()
-                 << ", associating trial: " << field_trial->trial_name();
-    return;
-  }
-
-  entry->field_trial = field_trial;
-}
-
-void FeatureList::RegisterFieldTrialOverride(const std::string& feature_name,
-                                             OverrideState override_state,
-                                             FieldTrial* field_trial) {
-  DCHECK(field_trial);
-  DCHECK(!ContainsKey(overrides_, feature_name) ||
-         !overrides_.find(feature_name)->second.field_trial)
-      << "Feature " << feature_name
-      << " has conflicting field trial overrides: "
-      << overrides_.find(feature_name)->second.field_trial->trial_name()
-      << " / " << field_trial->trial_name();
-
-  RegisterOverride(feature_name, override_state, field_trial);
-}
-
-void FeatureList::AddFeaturesToAllocator(PersistentMemoryAllocator* allocator) {
-  DCHECK(initialized_);
-
-  for (const auto& override : overrides_) {
-    Pickle pickle;
-    pickle.WriteString(override.first);
-    if (override.second.field_trial)
-      pickle.WriteString(override.second.field_trial->trial_name());
-
-    size_t total_size = sizeof(FeatureEntry) + pickle.size();
-    FeatureEntry* entry = allocator->New<FeatureEntry>(total_size);
-    if (!entry)
-      return;
-
-    entry->override_state = override.second.overridden_state;
-    entry->pickle_size = pickle.size();
-
-    char* dst = reinterpret_cast<char*>(entry) + sizeof(FeatureEntry);
-    memcpy(dst, pickle.data(), pickle.size());
-
-    allocator->MakeIterable(entry);
-  }
-}
-
-void FeatureList::GetFeatureOverrides(std::string* enable_overrides,
-                                      std::string* disable_overrides) {
-  GetFeatureOverridesImpl(enable_overrides, disable_overrides, false);
-}
-
-void FeatureList::GetCommandLineFeatureOverrides(
-    std::string* enable_overrides,
-    std::string* disable_overrides) {
-  GetFeatureOverridesImpl(enable_overrides, disable_overrides, true);
-}
-
-// static
-bool FeatureList::IsEnabled(const Feature& feature) {
-  if (!g_feature_list_instance) {
-    g_initialized_from_accessor = true;
-    return feature.default_state == FEATURE_ENABLED_BY_DEFAULT;
-  }
-  return g_feature_list_instance->IsFeatureEnabled(feature);
-}
-
-// static
-FieldTrial* FeatureList::GetFieldTrial(const Feature& feature) {
-  if (!g_feature_list_instance) {
-    g_initialized_from_accessor = true;
-    return nullptr;
-  }
-  return g_feature_list_instance->GetAssociatedFieldTrial(feature);
-}
-
-// static
-std::vector<base::StringPiece> FeatureList::SplitFeatureListString(
-    base::StringPiece input) {
-  return SplitStringPiece(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
-}
-
-// static
-bool FeatureList::InitializeInstance(const std::string& enable_features,
-                                     const std::string& disable_features) {
-  // We want to initialize a new instance here to support command-line features
-  // in testing better. For example, we initialize a dummy instance in
-  // base/test/test_suite.cc, and override it in content/browser/
-  // browser_main_loop.cc.
-  // On the other hand, we want to avoid re-initialization from command line.
-  // For example, we initialize an instance in chrome/browser/
-  // chrome_browser_main.cc and do not override it in content/browser/
-  // browser_main_loop.cc.
-  // If the singleton was previously initialized from within an accessor, we
-  // want to prevent callers from reinitializing the singleton and masking the
-  // accessor call(s) which likely returned incorrect information.
-  CHECK(!g_initialized_from_accessor);
-  bool instance_existed_before = false;
-  if (g_feature_list_instance) {
-    if (g_feature_list_instance->initialized_from_command_line_)
-      return false;
-
-    delete g_feature_list_instance;
-    g_feature_list_instance = nullptr;
-    instance_existed_before = true;
-  }
-
-  std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
-  feature_list->InitializeFromCommandLine(enable_features, disable_features);
-  base::FeatureList::SetInstance(std::move(feature_list));
-  return !instance_existed_before;
-}
-
-// static
-FeatureList* FeatureList::GetInstance() {
-  return g_feature_list_instance;
-}
-
-// static
-void FeatureList::SetInstance(std::unique_ptr<FeatureList> instance) {
-  DCHECK(!g_feature_list_instance);
-  instance->FinalizeInitialization();
-
-  // Note: Intentional leak of global singleton.
-  g_feature_list_instance = instance.release();
-
-#if DCHECK_IS_CONFIGURABLE
-  // Update the behaviour of LOG_DCHECK to match the Feature configuration.
-  // DCHECK is also forced to be FATAL if we are running a death-test.
-  // TODO(asvitkine): If we find other use-cases that need integrating here
-  // then define a proper API/hook for the purpose.
-  if (base::FeatureList::IsEnabled(kDCheckIsFatalFeature) ||
-      base::CommandLine::ForCurrentProcess()->HasSwitch(
-          "gtest_internal_run_death_test")) {
-    logging::LOG_DCHECK = logging::LOG_FATAL;
-  } else {
-    logging::LOG_DCHECK = logging::LOG_INFO;
-  }
-#endif  // DCHECK_IS_CONFIGURABLE
-}
-
-// static
-std::unique_ptr<FeatureList> FeatureList::ClearInstanceForTesting() {
-  FeatureList* old_instance = g_feature_list_instance;
-  g_feature_list_instance = nullptr;
-  g_initialized_from_accessor = false;
-  return base::WrapUnique(old_instance);
-}
-
-// static
-void FeatureList::RestoreInstanceForTesting(
-    std::unique_ptr<FeatureList> instance) {
-  DCHECK(!g_feature_list_instance);
-  // Note: Intentional leak of global singleton.
-  g_feature_list_instance = instance.release();
-}
-
-void FeatureList::FinalizeInitialization() {
-  DCHECK(!initialized_);
-  initialized_ = true;
-}
-
-bool FeatureList::IsFeatureEnabled(const Feature& feature) {
-  DCHECK(initialized_);
-  DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name;
-  DCHECK(CheckFeatureIdentity(feature)) << feature.name;
-
-  auto it = overrides_.find(feature.name);
-  if (it != overrides_.end()) {
-    const OverrideEntry& entry = it->second;
-
-    // Activate the corresponding field trial, if necessary.
-    if (entry.field_trial)
-      entry.field_trial->group();
-
-    // TODO(asvitkine) Expand this section as more support is added.
-
-    // If marked as OVERRIDE_USE_DEFAULT, simply return the default state below.
-    if (entry.overridden_state != OVERRIDE_USE_DEFAULT)
-      return entry.overridden_state == OVERRIDE_ENABLE_FEATURE;
-  }
-  // Otherwise, return the default state.
-  return feature.default_state == FEATURE_ENABLED_BY_DEFAULT;
-}
-
-FieldTrial* FeatureList::GetAssociatedFieldTrial(const Feature& feature) {
-  DCHECK(initialized_);
-  DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name;
-  DCHECK(CheckFeatureIdentity(feature)) << feature.name;
-
-  auto it = overrides_.find(feature.name);
-  if (it != overrides_.end()) {
-    const OverrideEntry& entry = it->second;
-    return entry.field_trial;
-  }
-
-  return nullptr;
-}
-
-void FeatureList::RegisterOverridesFromCommandLine(
-    const std::string& feature_list,
-    OverrideState overridden_state) {
-  for (const auto& value : SplitFeatureListString(feature_list)) {
-    StringPiece feature_name = value;
-    base::FieldTrial* trial = nullptr;
-
-    // The entry may be of the form FeatureName<FieldTrialName - in which case,
-    // this splits off the field trial name and associates it with the override.
-    std::string::size_type pos = feature_name.find('<');
-    if (pos != std::string::npos) {
-      feature_name.set(value.data(), pos);
-      trial = base::FieldTrialList::Find(value.substr(pos + 1).as_string());
-    }
-
-    RegisterOverride(feature_name, overridden_state, trial);
-  }
-}
-
-void FeatureList::RegisterOverride(StringPiece feature_name,
-                                   OverrideState overridden_state,
-                                   FieldTrial* field_trial) {
-  DCHECK(!initialized_);
-  if (field_trial) {
-    DCHECK(IsValidFeatureOrFieldTrialName(field_trial->trial_name()))
-        << field_trial->trial_name();
-  }
-  if (feature_name.starts_with("*")) {
-    feature_name = feature_name.substr(1);
-    overridden_state = OVERRIDE_USE_DEFAULT;
-  }
-
-  // Note: The semantics of insert() is that it does not overwrite the entry if
-  // one already exists for the key. Thus, only the first override for a given
-  // feature name takes effect.
-  overrides_.insert(std::make_pair(
-      feature_name.as_string(), OverrideEntry(overridden_state, field_trial)));
-}
-
-void FeatureList::GetFeatureOverridesImpl(std::string* enable_overrides,
-                                          std::string* disable_overrides,
-                                          bool command_line_only) {
-  DCHECK(initialized_);
-
-  enable_overrides->clear();
-  disable_overrides->clear();
-
-  // Note: Since |overrides_| is a std::map, iteration will be in alphabetical
-  // order. This is not guaranteed to users of this function, but is useful for
-  // tests to assume the order.
-  for (const auto& entry : overrides_) {
-    if (command_line_only &&
-        (entry.second.field_trial != nullptr ||
-         entry.second.overridden_state == OVERRIDE_USE_DEFAULT)) {
-      continue;
-    }
-
-    std::string* target_list = nullptr;
-    switch (entry.second.overridden_state) {
-      case OVERRIDE_USE_DEFAULT:
-      case OVERRIDE_ENABLE_FEATURE:
-        target_list = enable_overrides;
-        break;
-      case OVERRIDE_DISABLE_FEATURE:
-        target_list = disable_overrides;
-        break;
-    }
-
-    if (!target_list->empty())
-      target_list->push_back(',');
-    if (entry.second.overridden_state == OVERRIDE_USE_DEFAULT)
-      target_list->push_back('*');
-    target_list->append(entry.first);
-    if (entry.second.field_trial) {
-      target_list->push_back('<');
-      target_list->append(entry.second.field_trial->trial_name());
-    }
-  }
-}
-
-bool FeatureList::CheckFeatureIdentity(const Feature& feature) {
-  AutoLock auto_lock(feature_identity_tracker_lock_);
-
-  auto it = feature_identity_tracker_.find(feature.name);
-  if (it == feature_identity_tracker_.end()) {
-    // If it's not tracked yet, register it.
-    feature_identity_tracker_[feature.name] = &feature;
-    return true;
-  }
-  // Compare address of |feature| to the existing tracked entry.
-  return it->second == &feature;
-}
-
-FeatureList::OverrideEntry::OverrideEntry(OverrideState overridden_state,
-                                          FieldTrial* field_trial)
-    : overridden_state(overridden_state),
-      field_trial(field_trial),
-      overridden_by_field_trial(field_trial != nullptr) {}
-
-}  // namespace base
diff --git a/base/feature_list.h b/base/feature_list.h
deleted file mode 100644
index 2237507..0000000
--- a/base/feature_list.h
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_FEATURE_LIST_H_
-#define BASE_FEATURE_LIST_H_
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "base/base_export.h"
-#include "base/gtest_prod_util.h"
-#include "base/macros.h"
-#include "base/metrics/persistent_memory_allocator.h"
-#include "base/strings/string_piece.h"
-#include "base/synchronization/lock.h"
-
-namespace base {
-
-class FieldTrial;
-
-// Specifies whether a given feature is enabled or disabled by default.
-enum FeatureState {
-  FEATURE_DISABLED_BY_DEFAULT,
-  FEATURE_ENABLED_BY_DEFAULT,
-};
-
-// The Feature struct is used to define the default state for a feature. See
-// comment below for more details. There must only ever be one struct instance
-// for a given feature name - generally defined as a constant global variable or
-// file static. It should never be used as a constexpr as it breaks
-// pointer-based identity lookup.
-struct BASE_EXPORT Feature {
-  // The name of the feature. This should be unique to each feature and is used
-  // for enabling/disabling features via command line flags and experiments.
-  // It is strongly recommended to use CamelCase style for feature names, e.g.
-  // "MyGreatFeature".
-  const char* const name;
-
-  // The default state (i.e. enabled or disabled) for this feature.
-  const FeatureState default_state;
-};
-
-#if DCHECK_IS_CONFIGURABLE
-// DCHECKs have been built-in, and are configurable at run-time to be fatal, or
-// not, via a DcheckIsFatal feature. We define the Feature here since it is
-// checked in FeatureList::SetInstance(). See https://crbug.com/596231.
-extern BASE_EXPORT const Feature kDCheckIsFatalFeature;
-#endif  // DCHECK_IS_CONFIGURABLE
-
-// The FeatureList class is used to determine whether a given feature is on or
-// off. It provides an authoritative answer, taking into account command-line
-// overrides and experimental control.
-//
-// The basic use case is for any feature that can be toggled (e.g. through
-// command-line or an experiment) to have a defined Feature struct, e.g.:
-//
-//   const base::Feature kMyGreatFeature {
-//     "MyGreatFeature", base::FEATURE_ENABLED_BY_DEFAULT
-//   };
-//
-// Then, client code that wishes to query the state of the feature would check:
-//
-//   if (base::FeatureList::IsEnabled(kMyGreatFeature)) {
-//     // Feature code goes here.
-//   }
-//
-// Behind the scenes, the above call would take into account any command-line
-// flags to enable or disable the feature, any experiments that may control it
-// and finally its default state (in that order of priority), to determine
-// whether the feature is on.
-//
-// Features can be explicitly forced on or off by specifying a list of comma-
-// separated feature names via the following command-line flags:
-//
-//   --enable-features=Feature5,Feature7
-//   --disable-features=Feature1,Feature2,Feature3
-//
-// To enable/disable features in a test, do NOT append --enable-features or
-// --disable-features to the command-line directly. Instead, use
-// ScopedFeatureList. See base/test/scoped_feature_list.h for details.
-//
-// After initialization (which should be done single-threaded), the FeatureList
-// API is thread safe.
-//
-// Note: This class is a singleton, but does not use base/memory/singleton.h in
-// order to have control over its initialization sequence. Specifically, the
-// intended use is to create an instance of this class and fully initialize it,
-// before setting it as the singleton for a process, via SetInstance().
-class BASE_EXPORT FeatureList {
- public:
-  FeatureList();
-  ~FeatureList();
-
-  // Initializes feature overrides via command-line flags |enable_features| and
-  // |disable_features|, each of which is a comma-separated list of features to
-  // enable or disable, respectively. If a feature appears on both lists, then
-  // it will be disabled. If a list entry has the format "FeatureName<TrialName"
-  // then this initialization will also associate the feature state override
-  // with the named field trial, if it exists. If a feature name is prefixed
-  // with the '*' character, it will be created with OVERRIDE_USE_DEFAULT -
-  // which is useful for associating with a trial while using the default state.
-  // Must only be invoked during the initialization phase (before
-  // FinalizeInitialization() has been called).
-  void InitializeFromCommandLine(const std::string& enable_features,
-                                 const std::string& disable_features);
-
-  // Initializes feature overrides through the field trial allocator, which
-  // we're using to store the feature names, their override state, and the name
-  // of the associated field trial.
-  void InitializeFromSharedMemory(PersistentMemoryAllocator* allocator);
-
-  // Specifies whether a feature override enables or disables the feature.
-  enum OverrideState {
-    OVERRIDE_USE_DEFAULT,
-    OVERRIDE_DISABLE_FEATURE,
-    OVERRIDE_ENABLE_FEATURE,
-  };
-
-  // Returns true if the state of |feature_name| has been overridden via
-  // |InitializeFromCommandLine()|.
-  bool IsFeatureOverriddenFromCommandLine(const std::string& feature_name,
-                                          OverrideState state) const;
-
-  // Associates a field trial for reporting purposes corresponding to the
-  // command-line setting the feature state to |for_overridden_state|. The trial
-  // will be activated when the state of the feature is first queried. This
-  // should be called during registration, after InitializeFromCommandLine() has
-  // been called but before the instance is registered via SetInstance().
-  void AssociateReportingFieldTrial(const std::string& feature_name,
-                                    OverrideState for_overridden_state,
-                                    FieldTrial* field_trial);
-
-  // Registers a field trial to override the enabled state of the specified
-  // feature to |override_state|. Command-line overrides still take precedence
-  // over field trials, so this will have no effect if the feature is being
-  // overridden from the command-line. The associated field trial will be
-  // activated when the feature state for this feature is queried. This should
-  // be called during registration, after InitializeFromCommandLine() has been
-  // called but before the instance is registered via SetInstance().
-  void RegisterFieldTrialOverride(const std::string& feature_name,
-                                  OverrideState override_state,
-                                  FieldTrial* field_trial);
-
-  // Loops through feature overrides and serializes them all into |allocator|.
-  void AddFeaturesToAllocator(PersistentMemoryAllocator* allocator);
-
-  // Returns comma-separated lists of feature names (in the same format that is
-  // accepted by InitializeFromCommandLine()) corresponding to features that
-  // have been overridden - either through command-line or via FieldTrials. For
-  // those features that have an associated FieldTrial, the output entry will be
-  // of the format "FeatureName<TrialName", where "TrialName" is the name of the
-  // FieldTrial. Features that have overrides with OVERRIDE_USE_DEFAULT will be
-  // added to |enable_overrides| with a '*' character prefix. Must be called
-  // only after the instance has been initialized and registered.
-  void GetFeatureOverrides(std::string* enable_overrides,
-                           std::string* disable_overrides);
-
-  // Like GetFeatureOverrides(), but only returns overrides that were specified
-  // explicitly on the command-line, omitting the ones from field trials.
-  void GetCommandLineFeatureOverrides(std::string* enable_overrides,
-                                      std::string* disable_overrides);
-
-  // Returns whether the given |feature| is enabled. Must only be called after
-  // the singleton instance has been registered via SetInstance(). Additionally,
-  // a feature with a given name must only have a single corresponding Feature
-  // struct, which is checked in builds with DCHECKs enabled.
-  static bool IsEnabled(const Feature& feature);
-
-  // Returns the field trial associated with the given |feature|. Must only be
-  // called after the singleton instance has been registered via SetInstance().
-  static FieldTrial* GetFieldTrial(const Feature& feature);
-
-  // Splits a comma-separated string containing feature names into a vector. The
-  // resulting pieces point to parts of |input|.
-  static std::vector<base::StringPiece> SplitFeatureListString(
-      base::StringPiece input);
-
-  // Initializes and sets an instance of FeatureList with feature overrides via
-  // command-line flags |enable_features| and |disable_features| if one has not
-  // already been set from command-line flags. Returns true if an instance did
-  // not previously exist. See InitializeFromCommandLine() for more details
-  // about |enable_features| and |disable_features| parameters.
-  static bool InitializeInstance(const std::string& enable_features,
-                                 const std::string& disable_features);
-
-  // Returns the singleton instance of FeatureList. Will return null until an
-  // instance is registered via SetInstance().
-  static FeatureList* GetInstance();
-
-  // Registers the given |instance| to be the singleton feature list for this
-  // process. This should only be called once and |instance| must not be null.
-  // Note: If you are considering using this for the purposes of testing, take
-  // a look at using base/test/scoped_feature_list.h instead.
-  static void SetInstance(std::unique_ptr<FeatureList> instance);
-
-  // Clears the previously-registered singleton instance for tests and returns
-  // the old instance.
-  // Note: Most tests should never call this directly. Instead consider using
-  // base::test::ScopedFeatureList.
-  static std::unique_ptr<FeatureList> ClearInstanceForTesting();
-
-  // Sets a given (initialized) |instance| to be the singleton feature list,
-  // for testing. Existing instance must be null. This is primarily intended
-  // to support base::test::ScopedFeatureList helper class.
-  static void RestoreInstanceForTesting(std::unique_ptr<FeatureList> instance);
-
- private:
-  FRIEND_TEST_ALL_PREFIXES(FeatureListTest, CheckFeatureIdentity);
-  FRIEND_TEST_ALL_PREFIXES(FeatureListTest,
-                           StoreAndRetrieveFeaturesFromSharedMemory);
-  FRIEND_TEST_ALL_PREFIXES(FeatureListTest,
-                           StoreAndRetrieveAssociatedFeaturesFromSharedMemory);
-
-  struct OverrideEntry {
-    // The overridden enable (on/off) state of the feature.
-    const OverrideState overridden_state;
-
-    // An optional associated field trial, which will be activated when the
-    // state of the feature is queried for the first time. Weak pointer to the
-    // FieldTrial object that is owned by the FieldTrialList singleton.
-    base::FieldTrial* field_trial;
-
-    // Specifies whether the feature's state is overridden by |field_trial|.
-    // If it's not, and |field_trial| is not null, it means it is simply an
-    // associated field trial for reporting purposes (and |overridden_state|
-    // came from the command-line).
-    const bool overridden_by_field_trial;
-
-    // TODO(asvitkine): Expand this as more support is added.
-
-    // Constructs an OverrideEntry for the given |overridden_state|. If
-    // |field_trial| is not null, it implies that |overridden_state| comes from
-    // the trial, so |overridden_by_field_trial| will be set to true.
-    OverrideEntry(OverrideState overridden_state, FieldTrial* field_trial);
-  };
-
-  // Finalizes the initialization state of the FeatureList, so that no further
-  // overrides can be registered. This is called by SetInstance() on the
-  // singleton feature list that is being registered.
-  void FinalizeInitialization();
-
-  // Returns whether the given |feature| is enabled. This is invoked by the
-  // public FeatureList::IsEnabled() static function on the global singleton.
-  // Requires the FeatureList to have already been fully initialized.
-  bool IsFeatureEnabled(const Feature& feature);
-
-  // Returns the field trial associated with the given |feature|. This is
-  // invoked by the public FeatureList::GetFieldTrial() static function on the
-  // global singleton. Requires the FeatureList to have already been fully
-  // initialized.
-  base::FieldTrial* GetAssociatedFieldTrial(const Feature& feature);
-
-  // For each feature name in comma-separated list of strings |feature_list|,
-  // registers an override with the specified |overridden_state|. Also, will
-  // associate an optional named field trial if the entry is of the format
-  // "FeatureName<TrialName".
-  void RegisterOverridesFromCommandLine(const std::string& feature_list,
-                                        OverrideState overridden_state);
-
-  // Registers an override for feature |feature_name|. The override specifies
-  // whether the feature should be on or off (via |overridden_state|), which
-  // will take precedence over the feature's default state. If |field_trial| is
-  // not null, registers the specified field trial object to be associated with
-  // the feature, which will activate the field trial when the feature state is
-  // queried. If an override is already registered for the given feature, it
-  // will not be changed.
-  void RegisterOverride(StringPiece feature_name,
-                        OverrideState overridden_state,
-                        FieldTrial* field_trial);
-
-  // Implementation of GetFeatureOverrides() with a parameter that specifies
-  // whether only command-line enabled overrides should be emitted. See that
-  // function's comments for more details.
-  void GetFeatureOverridesImpl(std::string* enable_overrides,
-                               std::string* disable_overrides,
-                               bool command_line_only);
-
-  // Verifies that there's only a single definition of a Feature struct for a
-  // given feature name. Keeps track of the first seen Feature struct for each
-  // feature. Returns false when called on a Feature struct with a different
-  // address than the first one it saw for that feature name. Used only from
-  // DCHECKs and tests.
-  bool CheckFeatureIdentity(const Feature& feature);
-
-  // Map from feature name to an OverrideEntry struct for the feature, if it
-  // exists.
-  std::map<std::string, OverrideEntry> overrides_;
-
-  // Locked map that keeps track of seen features, to ensure a single feature is
-  // only defined once. This verification is only done in builds with DCHECKs
-  // enabled.
-  Lock feature_identity_tracker_lock_;
-  std::map<std::string, const Feature*> feature_identity_tracker_;
-
-  // Whether this object has been fully initialized. This gets set to true as a
-  // result of FinalizeInitialization().
-  bool initialized_ = false;
-
-  // Whether this object has been initialized from command line.
-  bool initialized_from_command_line_ = false;
-
-  DISALLOW_COPY_AND_ASSIGN(FeatureList);
-};
-
-}  // namespace base
-
-#endif  // BASE_FEATURE_LIST_H_
diff --git a/base/files/file.cc b/base/files/file.cc
index 92ff6dd..b79802b 100644
--- a/base/files/file.cc
+++ b/base/files/file.cc
@@ -5,7 +5,6 @@
 #include "base/files/file.h"
 #include "base/files/file_path.h"
 #include "base/files/file_tracing.h"
-#include "base/metrics/histogram.h"
 #include "base/timer/elapsed_timer.h"
 #include "build_config.h"
 
diff --git a/base/files/file_posix.cc b/base/files/file_posix.cc
index d6b7641..9f6ae16 100644
--- a/base/files/file_posix.cc
+++ b/base/files/file_posix.cc
@@ -11,7 +11,6 @@
 #include <unistd.h>
 
 #include "base/logging.h"
-#include "base/metrics/histogram_functions.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/strings/utf_string_conversions.h"
 #include "base/threading/thread_restrictions.h"
@@ -421,9 +420,6 @@
     case ENOTDIR:
       return FILE_ERROR_NOT_A_DIRECTORY;
     default:
-#if !defined(OS_NACL)  // NaCl build has no metrics code.
-      UmaHistogramSparse("PlatformFile.UnknownErrors.Posix", saved_errno);
-#endif
       // This function should only be called for errors.
       DCHECK_NE(0, saved_errno);
       return FILE_ERROR_FAILED;
diff --git a/base/files/important_file_writer.cc b/base/files/important_file_writer.cc
deleted file mode 100644
index 7fd9d79..0000000
--- a/base/files/important_file_writer.cc
+++ /dev/null
@@ -1,315 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/files/important_file_writer.h"
-
-#include <stddef.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <string>
-#include <utility>
-
-#include "base/bind.h"
-#include "base/callback_helpers.h"
-#include "base/critical_closure.h"
-#include "base/debug/alias.h"
-#include "base/files/file.h"
-#include "base/files/file_path.h"
-#include "base/files/file_util.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/metrics/histogram_functions.h"
-#include "base/metrics/histogram_macros.h"
-#include "base/numerics/safe_conversions.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_util.h"
-#include "base/task_runner.h"
-#include "base/task_runner_util.h"
-#include "base/threading/thread.h"
-#include "base/time/time.h"
-#include "build_config.h"
-
-namespace base {
-
-namespace {
-
-constexpr auto kDefaultCommitInterval = TimeDelta::FromSeconds(10);
-
-// This enum is used to define the buckets for an enumerated UMA histogram.
-// Hence,
-//   (a) existing enumerated constants should never be deleted or reordered, and
-//   (b) new constants should only be appended at the end of the enumeration.
-enum TempFileFailure {
-  FAILED_CREATING,
-  FAILED_OPENING,
-  FAILED_CLOSING,  // Unused.
-  FAILED_WRITING,
-  FAILED_RENAMING,
-  FAILED_FLUSHING,
-  TEMP_FILE_FAILURE_MAX
-};
-
-// Helper function to write samples to a histogram with a dynamically assigned
-// histogram name.  Works with different error code types convertible to int
-// which is the actual argument type of UmaHistogramExactLinear.
-template <typename SampleType>
-void UmaHistogramExactLinearWithSuffix(const char* histogram_name,
-                                       StringPiece histogram_suffix,
-                                       SampleType add_sample,
-                                       SampleType max_sample) {
-  static_assert(std::is_convertible<SampleType, int>::value,
-                "SampleType should be convertible to int");
-  DCHECK(histogram_name);
-  std::string histogram_full_name(histogram_name);
-  if (!histogram_suffix.empty()) {
-    histogram_full_name.append(".");
-    histogram_full_name.append(histogram_suffix.data(),
-                               histogram_suffix.length());
-  }
-  UmaHistogramExactLinear(histogram_full_name, static_cast<int>(add_sample),
-                          static_cast<int>(max_sample));
-}
-
-// Helper function to write samples to a histogram with a dynamically assigned
-// histogram name.  Works with short timings from 1 ms up to 10 seconds (50
-// buckets) which is the actual argument type of UmaHistogramTimes.
-void UmaHistogramTimesWithSuffix(const char* histogram_name,
-                                 StringPiece histogram_suffix,
-                                 TimeDelta sample) {
-  DCHECK(histogram_name);
-  std::string histogram_full_name(histogram_name);
-  if (!histogram_suffix.empty()) {
-    histogram_full_name.append(".");
-    histogram_full_name.append(histogram_suffix.data(),
-                               histogram_suffix.length());
-  }
-  UmaHistogramTimes(histogram_full_name, sample);
-}
-
-void LogFailure(const FilePath& path,
-                StringPiece histogram_suffix,
-                TempFileFailure failure_code,
-                StringPiece message) {
-  UmaHistogramExactLinearWithSuffix("ImportantFile.TempFileFailures",
-                                    histogram_suffix, failure_code,
-                                    TEMP_FILE_FAILURE_MAX);
-  DPLOG(WARNING) << "temp file failure: " << path.value() << " : " << message;
-}
-
-// Helper function to call WriteFileAtomically() with a
-// std::unique_ptr<std::string>.
-void WriteScopedStringToFileAtomically(
-    const FilePath& path,
-    std::unique_ptr<std::string> data,
-    Closure before_write_callback,
-    Callback<void(bool success)> after_write_callback,
-    const std::string& histogram_suffix) {
-  if (!before_write_callback.is_null())
-    before_write_callback.Run();
-
-  TimeTicks start_time = TimeTicks::Now();
-  bool result =
-      ImportantFileWriter::WriteFileAtomically(path, *data, histogram_suffix);
-  if (result) {
-    UmaHistogramTimesWithSuffix("ImportantFile.TimeToWrite", histogram_suffix,
-                                TimeTicks::Now() - start_time);
-  }
-
-  if (!after_write_callback.is_null())
-    after_write_callback.Run(result);
-}
-
-void DeleteTmpFile(const FilePath& tmp_file_path,
-                   StringPiece histogram_suffix) {
-  if (!DeleteFile(tmp_file_path, false)) {
-    UmaHistogramExactLinearWithSuffix(
-        "ImportantFile.FileDeleteError", histogram_suffix,
-        -base::File::GetLastFileError(), -base::File::FILE_ERROR_MAX);
-  }
-}
-
-}  // namespace
-
-// static
-bool ImportantFileWriter::WriteFileAtomically(const FilePath& path,
-                                              StringPiece data,
-                                              StringPiece histogram_suffix) {
-#if defined(OS_CHROMEOS)
-  // On Chrome OS, chrome gets killed when it cannot finish shutdown quickly,
-  // and this function seems to be one of the slowest shutdown steps.
-  // Include some info to the report for investigation. crbug.com/418627
-  // TODO(hashimoto): Remove this.
-  struct {
-    size_t data_size;
-    char path[128];
-  } file_info;
-  file_info.data_size = data.size();
-  strlcpy(file_info.path, path.value().c_str(), arraysize(file_info.path));
-  debug::Alias(&file_info);
-#endif
-
-  // Write the data to a temp file then rename to avoid data loss if we crash
-  // while writing the file. Ensure that the temp file is on the same volume
-  // as target file, so it can be moved in one step, and that the temp file
-  // is securely created.
-  FilePath tmp_file_path;
-  if (!CreateTemporaryFileInDir(path.DirName(), &tmp_file_path)) {
-    UmaHistogramExactLinearWithSuffix(
-        "ImportantFile.FileCreateError", histogram_suffix,
-        -base::File::GetLastFileError(), -base::File::FILE_ERROR_MAX);
-    LogFailure(path, histogram_suffix, FAILED_CREATING,
-               "could not create temporary file");
-    return false;
-  }
-
-  File tmp_file(tmp_file_path, File::FLAG_OPEN | File::FLAG_WRITE);
-  if (!tmp_file.IsValid()) {
-    UmaHistogramExactLinearWithSuffix(
-        "ImportantFile.FileOpenError", histogram_suffix,
-        -tmp_file.error_details(), -base::File::FILE_ERROR_MAX);
-    LogFailure(path, histogram_suffix, FAILED_OPENING,
-               "could not open temporary file");
-    DeleteFile(tmp_file_path, false);
-    return false;
-  }
-
-  // If this fails in the wild, something really bad is going on.
-  const int data_length = checked_cast<int32_t>(data.length());
-  int bytes_written = tmp_file.Write(0, data.data(), data_length);
-  if (bytes_written < data_length) {
-    UmaHistogramExactLinearWithSuffix(
-        "ImportantFile.FileWriteError", histogram_suffix,
-        -base::File::GetLastFileError(), -base::File::FILE_ERROR_MAX);
-  }
-  bool flush_success = tmp_file.Flush();
-  tmp_file.Close();
-
-  if (bytes_written < data_length) {
-    LogFailure(path, histogram_suffix, FAILED_WRITING,
-               "error writing, bytes_written=" + IntToString(bytes_written));
-    DeleteTmpFile(tmp_file_path, histogram_suffix);
-    return false;
-  }
-
-  if (!flush_success) {
-    LogFailure(path, histogram_suffix, FAILED_FLUSHING, "error flushing");
-    DeleteTmpFile(tmp_file_path, histogram_suffix);
-    return false;
-  }
-
-  base::File::Error replace_file_error = base::File::FILE_OK;
-  if (!ReplaceFile(tmp_file_path, path, &replace_file_error)) {
-    UmaHistogramExactLinearWithSuffix("ImportantFile.FileRenameError",
-                                      histogram_suffix, -replace_file_error,
-                                      -base::File::FILE_ERROR_MAX);
-    LogFailure(path, histogram_suffix, FAILED_RENAMING,
-               "could not rename temporary file");
-    DeleteTmpFile(tmp_file_path, histogram_suffix);
-    return false;
-  }
-
-  return true;
-}
-
-ImportantFileWriter::ImportantFileWriter(
-    const FilePath& path,
-    scoped_refptr<SequencedTaskRunner> task_runner,
-    const char* histogram_suffix)
-    : ImportantFileWriter(path,
-                          std::move(task_runner),
-                          kDefaultCommitInterval,
-                          histogram_suffix) {}
-
-ImportantFileWriter::ImportantFileWriter(
-    const FilePath& path,
-    scoped_refptr<SequencedTaskRunner> task_runner,
-    TimeDelta interval,
-    const char* histogram_suffix)
-    : path_(path),
-      task_runner_(std::move(task_runner)),
-      serializer_(nullptr),
-      commit_interval_(interval),
-      histogram_suffix_(histogram_suffix ? histogram_suffix : ""),
-      weak_factory_(this) {
-  DCHECK(task_runner_);
-}
-
-ImportantFileWriter::~ImportantFileWriter() {
-  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-  // We're usually a member variable of some other object, which also tends
-  // to be our serializer. It may not be safe to call back to the parent object
-  // being destructed.
-  DCHECK(!HasPendingWrite());
-}
-
-bool ImportantFileWriter::HasPendingWrite() const {
-  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-  return timer().IsRunning();
-}
-
-void ImportantFileWriter::WriteNow(std::unique_ptr<std::string> data) {
-  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-  if (!IsValueInRangeForNumericType<int32_t>(data->length())) {
-    NOTREACHED();
-    return;
-  }
-
-  Closure task = AdaptCallbackForRepeating(
-      BindOnce(&WriteScopedStringToFileAtomically, path_, std::move(data),
-               std::move(before_next_write_callback_),
-               std::move(after_next_write_callback_), histogram_suffix_));
-
-  if (!task_runner_->PostTask(FROM_HERE, MakeCriticalClosure(task))) {
-    // Posting the task to background message loop is not expected
-    // to fail, but if it does, avoid losing data and just hit the disk
-    // on the current thread.
-    NOTREACHED();
-
-    task.Run();
-  }
-  ClearPendingWrite();
-}
-
-void ImportantFileWriter::ScheduleWrite(DataSerializer* serializer) {
-  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-
-  DCHECK(serializer);
-  serializer_ = serializer;
-
-  if (!timer().IsRunning()) {
-    timer().Start(
-        FROM_HERE, commit_interval_,
-        Bind(&ImportantFileWriter::DoScheduledWrite, Unretained(this)));
-  }
-}
-
-void ImportantFileWriter::DoScheduledWrite() {
-  DCHECK(serializer_);
-  std::unique_ptr<std::string> data(new std::string);
-  if (serializer_->SerializeData(data.get())) {
-    WriteNow(std::move(data));
-  } else {
-    DLOG(WARNING) << "failed to serialize data to be saved in "
-                  << path_.value();
-  }
-  ClearPendingWrite();
-}
-
-void ImportantFileWriter::RegisterOnNextWriteCallbacks(
-    const Closure& before_next_write_callback,
-    const Callback<void(bool success)>& after_next_write_callback) {
-  before_next_write_callback_ = before_next_write_callback;
-  after_next_write_callback_ = after_next_write_callback;
-}
-
-void ImportantFileWriter::ClearPendingWrite() {
-  timer().Stop();
-  serializer_ = nullptr;
-}
-
-void ImportantFileWriter::SetTimerForTesting(Timer* timer_override) {
-  timer_override_ = timer_override;
-}
-
-}  // namespace base
diff --git a/base/files/important_file_writer.h b/base/files/important_file_writer.h
deleted file mode 100644
index 08a7ee3..0000000
--- a/base/files/important_file_writer.h
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_FILES_IMPORTANT_FILE_WRITER_H_
-#define BASE_FILES_IMPORTANT_FILE_WRITER_H_
-
-#include <string>
-
-#include "base/base_export.h"
-#include "base/callback.h"
-#include "base/files/file_path.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/sequence_checker.h"
-#include "base/strings/string_piece.h"
-#include "base/time/time.h"
-#include "base/timer/timer.h"
-
-namespace base {
-
-class SequencedTaskRunner;
-
-// Helper for atomically writing a file to ensure that it won't be corrupted by
-// *application* crash during write (implemented as create, flush, rename).
-//
-// As an added benefit, ImportantFileWriter makes it less likely that the file
-// is corrupted by *system* crash, though even if the ImportantFileWriter call
-// has already returned at the time of the crash it is not specified which
-// version of the file (old or new) is preserved. And depending on system
-// configuration (hardware and software) a significant likelihood of file
-// corruption may remain, thus using ImportantFileWriter is not a valid
-// substitute for file integrity checks and recovery codepaths for malformed
-// files.
-//
-// Also note that ImportantFileWriter can be *really* slow (cf. File::Flush()
-// for details) and thus please don't block shutdown on ImportantFileWriter.
-class BASE_EXPORT ImportantFileWriter {
- public:
-  // Used by ScheduleSave to lazily provide the data to be saved. Allows us
-  // to also batch data serializations.
-  class BASE_EXPORT DataSerializer {
-   public:
-    // Should put serialized string in |data| and return true on successful
-    // serialization. Will be called on the same thread on which
-    // ImportantFileWriter has been created.
-    virtual bool SerializeData(std::string* data) = 0;
-
-   protected:
-    virtual ~DataSerializer() = default;
-  };
-
-  // Save |data| to |path| in an atomic manner. Blocks and writes data on the
-  // current thread. Does not guarantee file integrity across system crash (see
-  // the class comment above).
-  static bool WriteFileAtomically(const FilePath& path,
-                                  StringPiece data,
-                                  StringPiece histogram_suffix = StringPiece());
-
-  // Initialize the writer.
-  // |path| is the name of file to write.
-  // |task_runner| is the SequencedTaskRunner instance where on which we will
-  // execute file I/O operations.
-  // All non-const methods, ctor and dtor must be called on the same thread.
-  ImportantFileWriter(const FilePath& path,
-                      scoped_refptr<SequencedTaskRunner> task_runner,
-                      const char* histogram_suffix = nullptr);
-
-  // Same as above, but with a custom commit interval.
-  ImportantFileWriter(const FilePath& path,
-                      scoped_refptr<SequencedTaskRunner> task_runner,
-                      TimeDelta interval,
-                      const char* histogram_suffix = nullptr);
-
-  // You have to ensure that there are no pending writes at the moment
-  // of destruction.
-  ~ImportantFileWriter();
-
-  const FilePath& path() const { return path_; }
-
-  // Returns true if there is a scheduled write pending which has not yet
-  // been started.
-  bool HasPendingWrite() const;
-
-  // Save |data| to target filename. Does not block. If there is a pending write
-  // scheduled by ScheduleWrite(), it is cancelled.
-  void WriteNow(std::unique_ptr<std::string> data);
-
-  // Schedule a save to target filename. Data will be serialized and saved
-  // to disk after the commit interval. If another ScheduleWrite is issued
-  // before that, only one serialization and write to disk will happen, and
-  // the most recent |serializer| will be used. This operation does not block.
-  // |serializer| should remain valid through the lifetime of
-  // ImportantFileWriter.
-  void ScheduleWrite(DataSerializer* serializer);
-
-  // Serialize data pending to be saved and execute write on backend thread.
-  void DoScheduledWrite();
-
-  // Registers |before_next_write_callback| and |after_next_write_callback| to
-  // be synchronously invoked from WriteFileAtomically() before its next write
-  // and after its next write, respectively. The boolean passed to
-  // |after_next_write_callback| indicates whether the write was successful.
-  // Both callbacks must be thread safe as they will be called on |task_runner_|
-  // and may be called during Chrome shutdown.
-  // If called more than once before a write is scheduled on |task_runner|, the
-  // latest callbacks clobber the others.
-  void RegisterOnNextWriteCallbacks(
-      const Closure& before_next_write_callback,
-      const Callback<void(bool success)>& after_next_write_callback);
-
-  TimeDelta commit_interval() const {
-    return commit_interval_;
-  }
-
-  // Overrides the timer to use for scheduling writes with |timer_override|.
-  void SetTimerForTesting(Timer* timer_override);
-
- private:
-  const Timer& timer() const {
-    return timer_override_ ? const_cast<const Timer&>(*timer_override_)
-                           : timer_;
-  }
-  Timer& timer() { return timer_override_ ? *timer_override_ : timer_; }
-
-  void ClearPendingWrite();
-
-  // Invoked synchronously on the next write event.
-  Closure before_next_write_callback_;
-  Callback<void(bool success)> after_next_write_callback_;
-
-  // Path being written to.
-  const FilePath path_;
-
-  // TaskRunner for the thread on which file I/O can be done.
-  const scoped_refptr<SequencedTaskRunner> task_runner_;
-
-  // Timer used to schedule commit after ScheduleWrite.
-  OneShotTimer timer_;
-
-  // An override for |timer_| used for testing.
-  Timer* timer_override_ = nullptr;
-
-  // Serializer which will provide the data to be saved.
-  DataSerializer* serializer_;
-
-  // Time delta after which scheduled data will be written to disk.
-  const TimeDelta commit_interval_;
-
-  // Custom histogram suffix.
-  const std::string histogram_suffix_;
-
-  SEQUENCE_CHECKER(sequence_checker_);
-
-  WeakPtrFactory<ImportantFileWriter> weak_factory_;
-
-  DISALLOW_COPY_AND_ASSIGN(ImportantFileWriter);
-};
-
-}  // namespace base
-
-#endif  // BASE_FILES_IMPORTANT_FILE_WRITER_H_
diff --git a/base/logging.cc b/base/logging.cc
index cf96aeb..72ca87a 100644
--- a/base/logging.cc
+++ b/base/logging.cc
@@ -88,7 +88,6 @@
 #include "base/callback.h"
 #include "base/command_line.h"
 #include "base/containers/stack.h"
-#include "base/debug/activity_tracker.h"
 #include "base/debug/alias.h"
 #include "base/debug/debugger.h"
 #include "base/debug/stack_trace.h"
@@ -816,12 +815,6 @@
   }
 
   if (severity_ == LOG_FATAL) {
-    // Write the log message to the global activity tracker, if running.
-    base::debug::GlobalActivityTracker* tracker =
-        base::debug::GlobalActivityTracker::Get();
-    if (tracker)
-      tracker->RecordLogMessage(str_newline);
-
     // Ensure the first characters of the string are on the stack so they
     // are contained in minidumps for diagnostic purposes.
     DEBUG_ALIAS_FOR_CSTR(str_stack, str_newline.c_str(), 1024);
diff --git a/base/memory/shared_memory_mac.cc b/base/memory/shared_memory_mac.cc
index 3990948..5e9e8fc 100644
--- a/base/memory/shared_memory_mac.cc
+++ b/base/memory/shared_memory_mac.cc
@@ -19,8 +19,6 @@
 #include "base/mac/scoped_mach_vm.h"
 #include "base/memory/shared_memory_helper.h"
 #include "base/memory/shared_memory_tracker.h"
-#include "base/metrics/field_trial.h"
-#include "base/metrics/histogram_macros.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/posix/safe_strerror.h"
 #include "base/process/process_metrics.h"
diff --git a/base/metrics/OWNERS b/base/metrics/OWNERS
deleted file mode 100644
index 4cc69ff..0000000
--- a/base/metrics/OWNERS
+++ /dev/null
@@ -1,10 +0,0 @@
-asvitkine@chromium.org
-bcwhite@chromium.org
-gayane@chromium.org
-holte@chromium.org
-isherman@chromium.org
-jwd@chromium.org
-mpearson@chromium.org
-rkaplow@chromium.org
-
-# COMPONENT: Internals>Metrics
diff --git a/base/metrics/bucket_ranges.cc b/base/metrics/bucket_ranges.cc
deleted file mode 100644
index 39b3793..0000000
--- a/base/metrics/bucket_ranges.cc
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/bucket_ranges.h"
-
-#include <cmath>
-
-#include "base/logging.h"
-
-namespace base {
-
-// Static table of checksums for all possible 8 bit bytes.
-const uint32_t kCrcTable[256] = {
-    0x0,         0x77073096L, 0xee0e612cL, 0x990951baL, 0x76dc419L,
-    0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0xedb8832L,  0x79dcb8a4L,
-    0xe0d5e91eL, 0x97d2d988L, 0x9b64c2bL,  0x7eb17cbdL, 0xe7b82d07L,
-    0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
-    0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
-    0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
-    0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
-    0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
-    0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
-    0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
-    0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
-    0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
-    0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
-    0x1db7106L,  0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x6b6b51fL,
-    0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0xf00f934L,  0x9609a88eL,
-    0xe10e9818L, 0x7f6a0dbbL, 0x86d3d2dL,  0x91646c97L, 0xe6635c01L,
-    0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
-    0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
-    0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
-    0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
-    0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
-    0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
-    0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
-    0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
-    0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
-    0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
-    0x3b6e20cL,  0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x4db2615L,
-    0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0xd6d6a3eL,  0x7a6a5aa8L,
-    0xe40ecf0bL, 0x9309ff9dL, 0xa00ae27L,  0x7d079eb1L, 0xf00f9344L,
-    0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
-    0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
-    0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
-    0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
-    0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
-    0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
-    0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
-    0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
-    0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
-    0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
-    0x26d930aL,  0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x5005713L,
-    0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0xcb61b38L,  0x92d28e9bL,
-    0xe5d5be0dL, 0x7cdcefb7L, 0xbdbdf21L,  0x86d3d2d4L, 0xf1d4e242L,
-    0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
-    0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
-    0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
-    0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
-    0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
-    0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
-    0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
-    0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
-    0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
-    0x2d02ef8dL,
-};
-
-// We generate the CRC-32 using the low order bits to select whether to XOR in
-// the reversed polynomial 0xedb88320L.  This is nice and simple, and allows us
-// to keep the quotient in a uint32_t.  Since we're not concerned about the
-// nature of corruptions (i.e., we don't care about bit sequencing, since we are
-// handling memory changes, which are more grotesque) so we don't bother to get
-// the CRC correct for big-endian vs little-ending calculations.  All we need is
-// a nice hash, that tends to depend on all the bits of the sample, with very
-// little chance of changes in one place impacting changes in another place.
-static uint32_t Crc32(uint32_t sum, HistogramBase::Sample value) {
-  union {
-    HistogramBase::Sample range;
-    unsigned char bytes[sizeof(HistogramBase::Sample)];
-  } converter;
-  converter.range = value;
-  for (size_t i = 0; i < sizeof(converter); ++i) {
-    sum = kCrcTable[(sum & 0xff) ^ converter.bytes[i]] ^ (sum >> 8);
-  }
-  return sum;
-}
-
-BucketRanges::BucketRanges(size_t num_ranges)
-    : ranges_(num_ranges, 0),
-      checksum_(0) {}
-
-BucketRanges::~BucketRanges() = default;
-
-uint32_t BucketRanges::CalculateChecksum() const {
-  // Seed checksum.
-  uint32_t checksum = static_cast<uint32_t>(ranges_.size());
-
-  for (size_t index = 0; index < ranges_.size(); ++index)
-    checksum = Crc32(checksum, ranges_[index]);
-  return checksum;
-}
-
-bool BucketRanges::HasValidChecksum() const {
-  return CalculateChecksum() == checksum_;
-}
-
-void BucketRanges::ResetChecksum() {
-  checksum_ = CalculateChecksum();
-}
-
-bool BucketRanges::Equals(const BucketRanges* other) const {
-  if (checksum_ != other->checksum_)
-    return false;
-  if (ranges_.size() != other->ranges_.size())
-    return false;
-  for (size_t index = 0; index < ranges_.size(); ++index) {
-    if (ranges_[index] != other->ranges_[index])
-      return false;
-  }
-  return true;
-}
-
-}  // namespace base
diff --git a/base/metrics/bucket_ranges.h b/base/metrics/bucket_ranges.h
deleted file mode 100644
index 1b6d069..0000000
--- a/base/metrics/bucket_ranges.h
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// BucketRanges stores the vector of ranges that delimit what samples are
-// tallied in the corresponding buckets of a histogram. Histograms that have
-// same ranges for all their corresponding buckets should share the same
-// BucketRanges object.
-//
-// E.g. A 5 buckets LinearHistogram with 1 as minimal value and 4 as maximal
-// value will need a BucketRanges with 6 ranges:
-// 0, 1, 2, 3, 4, INT_MAX
-//
-// TODO(kaiwang): Currently we keep all negative values in 0~1 bucket. Consider
-// changing 0 to INT_MIN.
-
-#ifndef BASE_METRICS_BUCKET_RANGES_H_
-#define BASE_METRICS_BUCKET_RANGES_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <vector>
-
-#include <limits.h>
-
-#include "base/atomicops.h"
-#include "base/base_export.h"
-#include "base/macros.h"
-#include "base/metrics/histogram_base.h"
-
-namespace base {
-
-class BASE_EXPORT BucketRanges {
- public:
-  typedef std::vector<HistogramBase::Sample> Ranges;
-
-  explicit BucketRanges(size_t num_ranges);
-  ~BucketRanges();
-
-  size_t size() const { return ranges_.size(); }
-  HistogramBase::Sample range(size_t i) const { return ranges_[i]; }
-  void set_range(size_t i, HistogramBase::Sample value) {
-    DCHECK_LT(i, ranges_.size());
-    DCHECK_GE(value, 0);
-    ranges_[i] = value;
-  }
-  uint32_t checksum() const { return checksum_; }
-  void set_checksum(uint32_t checksum) { checksum_ = checksum; }
-
-  // A bucket is defined by a consecutive pair of entries in |ranges|, so there
-  // is one fewer bucket than there are ranges.  For example, if |ranges| is
-  // [0, 1, 3, 7, INT_MAX], then the buckets in this histogram are
-  // [0, 1), [1, 3), [3, 7), and [7, INT_MAX).
-  size_t bucket_count() const { return ranges_.size() - 1; }
-
-  // Checksum methods to verify whether the ranges are corrupted (e.g. bad
-  // memory access).
-  uint32_t CalculateChecksum() const;
-  bool HasValidChecksum() const;
-  void ResetChecksum();
-
-  // Return true iff |other| object has same ranges_ as |this| object's ranges_.
-  bool Equals(const BucketRanges* other) const;
-
-  // Set and get a reference into persistent memory where this bucket data
-  // can be found (and re-used). These calls are internally atomic with no
-  // safety against overwriting an existing value since though it is wasteful
-  // to have multiple identical persistent records, it is still safe.
-  void set_persistent_reference(uint32_t ref) const {
-    subtle::Release_Store(&persistent_reference_, ref);
-  }
-  uint32_t persistent_reference() const {
-    return subtle::Acquire_Load(&persistent_reference_);
-  }
-
- private:
-  // A monotonically increasing list of values which determine which bucket to
-  // put a sample into.  For each index, show the smallest sample that can be
-  // added to the corresponding bucket.
-  Ranges ranges_;
-
-  // Checksum for the conntents of ranges_.  Used to detect random over-writes
-  // of our data, and to quickly see if some other BucketRanges instance is
-  // possibly Equal() to this instance.
-  // TODO(kaiwang): Consider change this to uint64_t. Because we see a lot of
-  // noise on UMA dashboard.
-  uint32_t checksum_;
-
-  // A reference into a global PersistentMemoryAllocator where the ranges
-  // information is stored. This allows for the record to be created once and
-  // re-used simply by having all histograms with the same ranges use the
-  // same reference.
-  mutable subtle::Atomic32 persistent_reference_ = 0;
-
-  DISALLOW_COPY_AND_ASSIGN(BucketRanges);
-};
-
-//////////////////////////////////////////////////////////////////////////////
-// Expose only for test.
-BASE_EXPORT extern const uint32_t kCrcTable[256];
-
-}  // namespace base
-
-#endif  // BASE_METRICS_BUCKET_RANGES_H_
diff --git a/base/metrics/dummy_histogram.cc b/base/metrics/dummy_histogram.cc
deleted file mode 100644
index 2707733..0000000
--- a/base/metrics/dummy_histogram.cc
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/dummy_histogram.h"
-
-#include <memory>
-
-#include "base/logging.h"
-#include "base/metrics/histogram_samples.h"
-#include "base/metrics/metrics_hashes.h"
-
-namespace base {
-
-namespace {
-
-// Helper classes for DummyHistogram.
-class DummySampleCountIterator : public SampleCountIterator {
- public:
-  DummySampleCountIterator() {}
-  ~DummySampleCountIterator() override {}
-
-  // SampleCountIterator:
-  bool Done() const override { return true; }
-  void Next() override { NOTREACHED(); }
-  void Get(HistogramBase::Sample* min,
-           int64_t* max,
-           HistogramBase::Count* count) const override {
-    NOTREACHED();
-  }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(DummySampleCountIterator);
-};
-
-class DummyHistogramSamples : public HistogramSamples {
- public:
-  explicit DummyHistogramSamples() : HistogramSamples(0, new LocalMetadata()) {}
-  ~DummyHistogramSamples() override {
-    delete static_cast<LocalMetadata*>(meta());
-  }
-
-  // HistogramSamples:
-  void Accumulate(HistogramBase::Sample value,
-                  HistogramBase::Count count) override {}
-  HistogramBase::Count GetCount(HistogramBase::Sample value) const override {
-    return HistogramBase::Count();
-  }
-  HistogramBase::Count TotalCount() const override {
-    return HistogramBase::Count();
-  }
-  std::unique_ptr<SampleCountIterator> Iterator() const override {
-    return std::make_unique<DummySampleCountIterator>();
-  }
-  bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override {
-    return true;
-  }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(DummyHistogramSamples);
-};
-
-}  // namespace
-
-// static
-DummyHistogram* DummyHistogram::GetInstance() {
-  static base::NoDestructor<DummyHistogram> dummy_histogram;
-  return dummy_histogram.get();
-}
-
-uint64_t DummyHistogram::name_hash() const {
-  return HashMetricName(histogram_name());
-}
-
-HistogramType DummyHistogram::GetHistogramType() const {
-  return DUMMY_HISTOGRAM;
-}
-
-bool DummyHistogram::HasConstructionArguments(
-    Sample expected_minimum,
-    Sample expected_maximum,
-    uint32_t expected_bucket_count) const {
-  return true;
-}
-
-bool DummyHistogram::AddSamplesFromPickle(PickleIterator* iter) {
-  return true;
-}
-
-std::unique_ptr<HistogramSamples> DummyHistogram::SnapshotSamples() const {
-  return std::make_unique<DummyHistogramSamples>();
-}
-
-std::unique_ptr<HistogramSamples> DummyHistogram::SnapshotDelta() {
-  return std::make_unique<DummyHistogramSamples>();
-}
-
-std::unique_ptr<HistogramSamples> DummyHistogram::SnapshotFinalDelta() const {
-  return std::make_unique<DummyHistogramSamples>();
-}
-
-}  // namespace base
diff --git a/base/metrics/dummy_histogram.h b/base/metrics/dummy_histogram.h
deleted file mode 100644
index e2cb64e..0000000
--- a/base/metrics/dummy_histogram.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_DUMMY_HISTOGRAM_H_
-#define BASE_METRICS_DUMMY_HISTOGRAM_H_
-
-#include <stdint.h>
-
-#include <memory>
-#include <string>
-
-#include "base/base_export.h"
-#include "base/metrics/histogram_base.h"
-#include "base/no_destructor.h"
-
-namespace base {
-
-// DummyHistogram is used for mocking histogram objects for histograms that
-// shouldn't be recorded. It doesn't do any actual processing.
-class BASE_EXPORT DummyHistogram : public HistogramBase {
- public:
-  static DummyHistogram* GetInstance();
-
-  // HistogramBase:
-  void CheckName(const StringPiece& name) const override {}
-  uint64_t name_hash() const override;
-  HistogramType GetHistogramType() const override;
-  bool HasConstructionArguments(Sample expected_minimum,
-                                Sample expected_maximum,
-                                uint32_t expected_bucket_count) const override;
-  void Add(Sample value) override {}
-  void AddCount(Sample value, int count) override {}
-  void AddSamples(const HistogramSamples& samples) override {}
-  bool AddSamplesFromPickle(PickleIterator* iter) override;
-  std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
-  std::unique_ptr<HistogramSamples> SnapshotDelta() override;
-  std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
-  void WriteHTMLGraph(std::string* output) const override {}
-  void WriteAscii(std::string* output) const override {}
-
- protected:
-  // HistogramBase:
-  void SerializeInfoImpl(Pickle* pickle) const override {}
-  void GetParameters(DictionaryValue* params) const override {}
-  void GetCountAndBucketData(Count* count,
-                             int64_t* sum,
-                             ListValue* buckets) const override {}
-
- private:
-  friend class NoDestructor<DummyHistogram>;
-
-  DummyHistogram() : HistogramBase("dummy_histogram") {}
-  ~DummyHistogram() override {}
-
-  DISALLOW_COPY_AND_ASSIGN(DummyHistogram);
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_DUMMY_HISTOGRAM_H_
diff --git a/base/metrics/field_trial.cc b/base/metrics/field_trial.cc
deleted file mode 100644
index 25f21ca..0000000
--- a/base/metrics/field_trial.cc
+++ /dev/null
@@ -1,1517 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/field_trial.h"
-
-#include <algorithm>
-#include <utility>
-
-#include "base/base_switches.h"
-#include "base/command_line.h"
-#include "base/debug/activity_tracker.h"
-#include "base/logging.h"
-#include "base/metrics/field_trial_param_associator.h"
-#include "base/process/memory.h"
-#include "base/process/process_handle.h"
-#include "base/process/process_info.h"
-#include "base/rand_util.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_split.h"
-#include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
-#include "base/strings/utf_string_conversions.h"
-#include "base/unguessable_token.h"
-
-// On POSIX, the fd is shared using the mapping in GlobalDescriptors.
-#if defined(OS_POSIX) && !defined(OS_NACL)
-#include "base/posix/global_descriptors.h"
-#endif
-
-namespace base {
-
-namespace {
-
-// Define a separator character to use when creating a persistent form of an
-// instance.  This is intended for use as a command line argument, passed to a
-// second process to mimic our state (i.e., provide the same group name).
-const char kPersistentStringSeparator = '/';  // Currently a slash.
-
-// Define a marker character to be used as a prefix to a trial name on the
-// command line which forces its activation.
-const char kActivationMarker = '*';
-
-// Use shared memory to communicate field trial (experiment) state. Set to false
-// for now while the implementation is fleshed out (e.g. data format, single
-// shared memory segment). See https://codereview.chromium.org/2365273004/ and
-// crbug.com/653874
-// The browser is the only process that has write access to the shared memory.
-// This is safe from race conditions because MakeIterable is a release operation
-// and GetNextOfType is an acquire operation, so memory writes before
-// MakeIterable happen before memory reads after GetNextOfType.
-#if defined(OS_FUCHSIA)  // TODO(752368): Not yet supported on Fuchsia.
-const bool kUseSharedMemoryForFieldTrials = false;
-#else
-const bool kUseSharedMemoryForFieldTrials = true;
-#endif
-
-// Constants for the field trial allocator.
-const char kAllocatorName[] = "FieldTrialAllocator";
-
-// We allocate 128 KiB to hold all the field trial data. This should be enough,
-// as most people use 3 - 25 KiB for field trials (as of 11/25/2016).
-// This also doesn't allocate all 128 KiB at once -- the pages only get mapped
-// to physical memory when they are touched. If the size of the allocated field
-// trials does get larger than 128 KiB, then we will drop some field trials in
-// child processes, leading to an inconsistent view between browser and child
-// processes and possibly causing crashes (see crbug.com/661617).
-const size_t kFieldTrialAllocationSize = 128 << 10;  // 128 KiB
-
-// Writes out string1 and then string2 to pickle.
-void WriteStringPair(Pickle* pickle,
-                     const StringPiece& string1,
-                     const StringPiece& string2) {
-  pickle->WriteString(string1);
-  pickle->WriteString(string2);
-}
-
-// Writes out the field trial's contents (via trial_state) to the pickle. The
-// format of the pickle looks like:
-// TrialName, GroupName, ParamKey1, ParamValue1, ParamKey2, ParamValue2, ...
-// If there are no parameters, then it just ends at GroupName.
-void PickleFieldTrial(const FieldTrial::State& trial_state, Pickle* pickle) {
-  WriteStringPair(pickle, *trial_state.trial_name, *trial_state.group_name);
-
-  // Get field trial params.
-  std::map<std::string, std::string> params;
-  FieldTrialParamAssociator::GetInstance()->GetFieldTrialParamsWithoutFallback(
-      *trial_state.trial_name, *trial_state.group_name, &params);
-
-  // Write params to pickle.
-  for (const auto& param : params)
-    WriteStringPair(pickle, param.first, param.second);
-}
-
-// Created a time value based on |year|, |month| and |day_of_month| parameters.
-Time CreateTimeFromParams(int year, int month, int day_of_month) {
-  DCHECK_GT(year, 1970);
-  DCHECK_GT(month, 0);
-  DCHECK_LT(month, 13);
-  DCHECK_GT(day_of_month, 0);
-  DCHECK_LT(day_of_month, 32);
-
-  Time::Exploded exploded;
-  exploded.year = year;
-  exploded.month = month;
-  exploded.day_of_week = 0;  // Should be unused.
-  exploded.day_of_month = day_of_month;
-  exploded.hour = 0;
-  exploded.minute = 0;
-  exploded.second = 0;
-  exploded.millisecond = 0;
-  Time out_time;
-  if (!Time::FromLocalExploded(exploded, &out_time)) {
-    // TODO(maksims): implement failure handling.
-    // We might just return |out_time|, which is Time(0).
-    NOTIMPLEMENTED();
-  }
-
-  return out_time;
-}
-
-// Returns the boundary value for comparing against the FieldTrial's added
-// groups for a given |divisor| (total probability) and |entropy_value|.
-FieldTrial::Probability GetGroupBoundaryValue(
-    FieldTrial::Probability divisor,
-    double entropy_value) {
-  // Add a tiny epsilon value to get consistent results when converting floating
-  // points to int. Without it, boundary values have inconsistent results, e.g.:
-  //
-  //   static_cast<FieldTrial::Probability>(100 * 0.56) == 56
-  //   static_cast<FieldTrial::Probability>(100 * 0.57) == 56
-  //   static_cast<FieldTrial::Probability>(100 * 0.58) == 57
-  //   static_cast<FieldTrial::Probability>(100 * 0.59) == 59
-  const double kEpsilon = 1e-8;
-  const FieldTrial::Probability result =
-      static_cast<FieldTrial::Probability>(divisor * entropy_value + kEpsilon);
-  // Ensure that adding the epsilon still results in a value < |divisor|.
-  return std::min(result, divisor - 1);
-}
-
-// Separate type from FieldTrial::State so that it can use StringPieces.
-struct FieldTrialStringEntry {
-  StringPiece trial_name;
-  StringPiece group_name;
-  bool activated = false;
-};
-
-// Parses the --force-fieldtrials string |trials_string| into |entries|.
-// Returns true if the string was parsed correctly. On failure, the |entries|
-// array may end up being partially filled.
-bool ParseFieldTrialsString(const std::string& trials_string,
-                            std::vector<FieldTrialStringEntry>* entries) {
-  const StringPiece trials_string_piece(trials_string);
-
-  size_t next_item = 0;
-  while (next_item < trials_string.length()) {
-    size_t name_end = trials_string.find(kPersistentStringSeparator, next_item);
-    if (name_end == trials_string.npos || next_item == name_end)
-      return false;
-    size_t group_name_end =
-        trials_string.find(kPersistentStringSeparator, name_end + 1);
-    if (name_end + 1 == group_name_end)
-      return false;
-    if (group_name_end == trials_string.npos)
-      group_name_end = trials_string.length();
-
-    FieldTrialStringEntry entry;
-    // Verify if the trial should be activated or not.
-    if (trials_string[next_item] == kActivationMarker) {
-      // Name cannot be only the indicator.
-      if (name_end - next_item == 1)
-        return false;
-      next_item++;
-      entry.activated = true;
-    }
-    entry.trial_name =
-        trials_string_piece.substr(next_item, name_end - next_item);
-    entry.group_name =
-        trials_string_piece.substr(name_end + 1, group_name_end - name_end - 1);
-    next_item = group_name_end + 1;
-
-    entries->push_back(std::move(entry));
-  }
-  return true;
-}
-
-void AddFeatureAndFieldTrialFlags(const char* enable_features_switch,
-                                  const char* disable_features_switch,
-                                  CommandLine* cmd_line) {
-  std::string enabled_features;
-  std::string disabled_features;
-  FeatureList::GetInstance()->GetFeatureOverrides(&enabled_features,
-                                                  &disabled_features);
-
-  if (!enabled_features.empty())
-    cmd_line->AppendSwitchASCII(enable_features_switch, enabled_features);
-  if (!disabled_features.empty())
-    cmd_line->AppendSwitchASCII(disable_features_switch, disabled_features);
-
-  std::string field_trial_states;
-  FieldTrialList::AllStatesToString(&field_trial_states, false);
-  if (!field_trial_states.empty()) {
-    cmd_line->AppendSwitchASCII(switches::kForceFieldTrials,
-                                field_trial_states);
-  }
-}
-
-void OnOutOfMemory(size_t size) {
-#if defined(OS_NACL)
-  NOTREACHED();
-#else
-  TerminateBecauseOutOfMemory(size);
-#endif
-}
-
-#if !defined(OS_NACL)
-// Returns whether the operation succeeded.
-bool DeserializeGUIDFromStringPieces(base::StringPiece first,
-                                     base::StringPiece second,
-                                     base::UnguessableToken* guid) {
-  uint64_t high = 0;
-  uint64_t low = 0;
-  if (!base::StringToUint64(first, &high) ||
-      !base::StringToUint64(second, &low)) {
-    return false;
-  }
-
-  *guid = base::UnguessableToken::Deserialize(high, low);
-  return true;
-}
-
-// Extract a read-only SharedMemoryHandle from an existing |shared_memory|
-// handle. Note that on Android, this also makes the whole region read-only.
-SharedMemoryHandle GetSharedMemoryReadOnlyHandle(SharedMemory* shared_memory) {
-  SharedMemoryHandle result = shared_memory->GetReadOnlyHandle();
-#if defined(OS_ANDROID)
-  // On Android, turn the region read-only. This prevents any future
-  // writable mapping attempts, but the original one in |shm| survives
-  // and is still usable in the current process.
-  result.SetRegionReadOnly();
-#endif  // OS_ANDROID
-  return result;
-}
-#endif  // !OS_NACL
-
-}  // namespace
-
-// statics
-const int FieldTrial::kNotFinalized = -1;
-const int FieldTrial::kDefaultGroupNumber = 0;
-bool FieldTrial::enable_benchmarking_ = false;
-
-int FieldTrialList::kNoExpirationYear = 0;
-
-//------------------------------------------------------------------------------
-// FieldTrial methods and members.
-
-FieldTrial::EntropyProvider::~EntropyProvider() = default;
-
-FieldTrial::State::State() = default;
-
-FieldTrial::State::State(const State& other) = default;
-
-FieldTrial::State::~State() = default;
-
-bool FieldTrial::FieldTrialEntry::GetTrialAndGroupName(
-    StringPiece* trial_name,
-    StringPiece* group_name) const {
-  PickleIterator iter = GetPickleIterator();
-  return ReadStringPair(&iter, trial_name, group_name);
-}
-
-bool FieldTrial::FieldTrialEntry::GetParams(
-    std::map<std::string, std::string>* params) const {
-  PickleIterator iter = GetPickleIterator();
-  StringPiece tmp;
-  // Skip reading trial and group name.
-  if (!ReadStringPair(&iter, &tmp, &tmp))
-    return false;
-
-  while (true) {
-    StringPiece key;
-    StringPiece value;
-    if (!ReadStringPair(&iter, &key, &value))
-      return key.empty();  // Non-empty is bad: got one of a pair.
-    (*params)[key.as_string()] = value.as_string();
-  }
-}
-
-PickleIterator FieldTrial::FieldTrialEntry::GetPickleIterator() const {
-  const char* src =
-      reinterpret_cast<const char*>(this) + sizeof(FieldTrialEntry);
-
-  Pickle pickle(src, pickle_size);
-  return PickleIterator(pickle);
-}
-
-bool FieldTrial::FieldTrialEntry::ReadStringPair(
-    PickleIterator* iter,
-    StringPiece* trial_name,
-    StringPiece* group_name) const {
-  if (!iter->ReadStringPiece(trial_name))
-    return false;
-  if (!iter->ReadStringPiece(group_name))
-    return false;
-  return true;
-}
-
-void FieldTrial::Disable() {
-  DCHECK(!group_reported_);
-  enable_field_trial_ = false;
-
-  // In case we are disabled after initialization, we need to switch
-  // the trial to the default group.
-  if (group_ != kNotFinalized) {
-    // Only reset when not already the default group, because in case we were
-    // forced to the default group, the group number may not be
-    // kDefaultGroupNumber, so we should keep it as is.
-    if (group_name_ != default_group_name_)
-      SetGroupChoice(default_group_name_, kDefaultGroupNumber);
-  }
-}
-
-int FieldTrial::AppendGroup(const std::string& name,
-                            Probability group_probability) {
-  // When the group choice was previously forced, we only need to return the
-  // the id of the chosen group, and anything can be returned for the others.
-  if (forced_) {
-    DCHECK(!group_name_.empty());
-    if (name == group_name_) {
-      // Note that while |group_| may be equal to |kDefaultGroupNumber| on the
-      // forced trial, it will not have the same value as the default group
-      // number returned from the non-forced |FactoryGetFieldTrial()| call,
-      // which takes care to ensure that this does not happen.
-      return group_;
-    }
-    DCHECK_NE(next_group_number_, group_);
-    // We still return different numbers each time, in case some caller need
-    // them to be different.
-    return next_group_number_++;
-  }
-
-  DCHECK_LE(group_probability, divisor_);
-  DCHECK_GE(group_probability, 0);
-
-  if (enable_benchmarking_ || !enable_field_trial_)
-    group_probability = 0;
-
-  accumulated_group_probability_ += group_probability;
-
-  DCHECK_LE(accumulated_group_probability_, divisor_);
-  if (group_ == kNotFinalized && accumulated_group_probability_ > random_) {
-    // This is the group that crossed the random line, so we do the assignment.
-    SetGroupChoice(name, next_group_number_);
-  }
-  return next_group_number_++;
-}
-
-int FieldTrial::group() {
-  FinalizeGroupChoice();
-  if (trial_registered_)
-    FieldTrialList::NotifyFieldTrialGroupSelection(this);
-  return group_;
-}
-
-const std::string& FieldTrial::group_name() {
-  // Call |group()| to ensure group gets assigned and observers are notified.
-  group();
-  DCHECK(!group_name_.empty());
-  return group_name_;
-}
-
-const std::string& FieldTrial::GetGroupNameWithoutActivation() {
-  FinalizeGroupChoice();
-  return group_name_;
-}
-
-void FieldTrial::SetForced() {
-  // We might have been forced before (e.g., by CreateFieldTrial) and it's
-  // first come first served, e.g., command line switch has precedence.
-  if (forced_)
-    return;
-
-  // And we must finalize the group choice before we mark ourselves as forced.
-  FinalizeGroupChoice();
-  forced_ = true;
-}
-
-// static
-void FieldTrial::EnableBenchmarking() {
-  DCHECK_EQ(0u, FieldTrialList::GetFieldTrialCount());
-  enable_benchmarking_ = true;
-}
-
-// static
-FieldTrial* FieldTrial::CreateSimulatedFieldTrial(
-    const std::string& trial_name,
-    Probability total_probability,
-    const std::string& default_group_name,
-    double entropy_value) {
-  return new FieldTrial(trial_name, total_probability, default_group_name,
-                        entropy_value);
-}
-
-FieldTrial::FieldTrial(const std::string& trial_name,
-                       const Probability total_probability,
-                       const std::string& default_group_name,
-                       double entropy_value)
-    : trial_name_(trial_name),
-      divisor_(total_probability),
-      default_group_name_(default_group_name),
-      random_(GetGroupBoundaryValue(total_probability, entropy_value)),
-      accumulated_group_probability_(0),
-      next_group_number_(kDefaultGroupNumber + 1),
-      group_(kNotFinalized),
-      enable_field_trial_(true),
-      forced_(false),
-      group_reported_(false),
-      trial_registered_(false),
-      ref_(FieldTrialList::FieldTrialAllocator::kReferenceNull) {
-  DCHECK_GT(total_probability, 0);
-  DCHECK(!trial_name_.empty());
-  DCHECK(!default_group_name_.empty())
-      << "Trial " << trial_name << " is missing a default group name.";
-}
-
-FieldTrial::~FieldTrial() = default;
-
-void FieldTrial::SetTrialRegistered() {
-  DCHECK_EQ(kNotFinalized, group_);
-  DCHECK(!trial_registered_);
-  trial_registered_ = true;
-}
-
-void FieldTrial::SetGroupChoice(const std::string& group_name, int number) {
-  group_ = number;
-  if (group_name.empty())
-    StringAppendF(&group_name_, "%d", group_);
-  else
-    group_name_ = group_name;
-  DVLOG(1) << "Field trial: " << trial_name_ << " Group choice:" << group_name_;
-}
-
-void FieldTrial::FinalizeGroupChoice() {
-  FinalizeGroupChoiceImpl(false);
-}
-
-void FieldTrial::FinalizeGroupChoiceImpl(bool is_locked) {
-  if (group_ != kNotFinalized)
-    return;
-  accumulated_group_probability_ = divisor_;
-  // Here it's OK to use |kDefaultGroupNumber| since we can't be forced and not
-  // finalized.
-  DCHECK(!forced_);
-  SetGroupChoice(default_group_name_, kDefaultGroupNumber);
-
-  // Add the field trial to shared memory.
-  if (kUseSharedMemoryForFieldTrials && trial_registered_)
-    FieldTrialList::OnGroupFinalized(is_locked, this);
-}
-
-bool FieldTrial::GetActiveGroup(ActiveGroup* active_group) const {
-  if (!group_reported_ || !enable_field_trial_)
-    return false;
-  DCHECK_NE(group_, kNotFinalized);
-  active_group->trial_name = trial_name_;
-  active_group->group_name = group_name_;
-  return true;
-}
-
-bool FieldTrial::GetStateWhileLocked(State* field_trial_state,
-                                     bool include_expired) {
-  if (!include_expired && !enable_field_trial_)
-    return false;
-  FinalizeGroupChoiceImpl(true);
-  field_trial_state->trial_name = &trial_name_;
-  field_trial_state->group_name = &group_name_;
-  field_trial_state->activated = group_reported_;
-  return true;
-}
-
-//------------------------------------------------------------------------------
-// FieldTrialList methods and members.
-
-// static
-FieldTrialList* FieldTrialList::global_ = nullptr;
-
-// static
-bool FieldTrialList::used_without_global_ = false;
-
-FieldTrialList::Observer::~Observer() = default;
-
-FieldTrialList::FieldTrialList(
-    std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider)
-    : entropy_provider_(std::move(entropy_provider)),
-      observer_list_(new ObserverListThreadSafe<FieldTrialList::Observer>(
-          ObserverListPolicy::EXISTING_ONLY)) {
-  DCHECK(!global_);
-  DCHECK(!used_without_global_);
-  global_ = this;
-
-  CHECK(false);  // TODO(scottmg): Remove FieldTrialList.
-}
-
-FieldTrialList::~FieldTrialList() {
-  AutoLock auto_lock(lock_);
-  while (!registered_.empty()) {
-    RegistrationMap::iterator it = registered_.begin();
-    it->second->Release();
-    registered_.erase(it->first);
-  }
-  DCHECK_EQ(this, global_);
-  global_ = nullptr;
-}
-
-// static
-FieldTrial* FieldTrialList::FactoryGetFieldTrial(
-    const std::string& trial_name,
-    FieldTrial::Probability total_probability,
-    const std::string& default_group_name,
-    const int year,
-    const int month,
-    const int day_of_month,
-    FieldTrial::RandomizationType randomization_type,
-    int* default_group_number) {
-  return FactoryGetFieldTrialWithRandomizationSeed(
-      trial_name, total_probability, default_group_name, year, month,
-      day_of_month, randomization_type, 0, default_group_number, nullptr);
-}
-
-// static
-FieldTrial* FieldTrialList::FactoryGetFieldTrialWithRandomizationSeed(
-    const std::string& trial_name,
-    FieldTrial::Probability total_probability,
-    const std::string& default_group_name,
-    const int year,
-    const int month,
-    const int day_of_month,
-    FieldTrial::RandomizationType randomization_type,
-    uint32_t randomization_seed,
-    int* default_group_number,
-    const FieldTrial::EntropyProvider* override_entropy_provider) {
-  if (default_group_number)
-    *default_group_number = FieldTrial::kDefaultGroupNumber;
-  // Check if the field trial has already been created in some other way.
-  FieldTrial* existing_trial = Find(trial_name);
-  if (existing_trial) {
-    CHECK(existing_trial->forced_);
-    // If the default group name differs between the existing forced trial
-    // and this trial, then use a different value for the default group number.
-    if (default_group_number &&
-        default_group_name != existing_trial->default_group_name()) {
-      // If the new default group number corresponds to the group that was
-      // chosen for the forced trial (which has been finalized when it was
-      // forced), then set the default group number to that.
-      if (default_group_name == existing_trial->group_name_internal()) {
-        *default_group_number = existing_trial->group_;
-      } else {
-        // Otherwise, use |kNonConflictingGroupNumber| (-2) for the default
-        // group number, so that it does not conflict with the |AppendGroup()|
-        // result for the chosen group.
-        const int kNonConflictingGroupNumber = -2;
-        static_assert(
-            kNonConflictingGroupNumber != FieldTrial::kDefaultGroupNumber,
-            "The 'non-conflicting' group number conflicts");
-        static_assert(kNonConflictingGroupNumber != FieldTrial::kNotFinalized,
-                      "The 'non-conflicting' group number conflicts");
-        *default_group_number = kNonConflictingGroupNumber;
-      }
-    }
-    return existing_trial;
-  }
-
-  double entropy_value;
-  if (randomization_type == FieldTrial::ONE_TIME_RANDOMIZED) {
-    // If an override entropy provider is given, use it.
-    const FieldTrial::EntropyProvider* entropy_provider =
-        override_entropy_provider ? override_entropy_provider
-                                  : GetEntropyProviderForOneTimeRandomization();
-    CHECK(entropy_provider);
-    entropy_value = entropy_provider->GetEntropyForTrial(trial_name,
-                                                         randomization_seed);
-  } else {
-    DCHECK_EQ(FieldTrial::SESSION_RANDOMIZED, randomization_type);
-    DCHECK_EQ(0U, randomization_seed);
-    entropy_value = RandDouble();
-  }
-
-  FieldTrial* field_trial = new FieldTrial(trial_name, total_probability,
-                                           default_group_name, entropy_value);
-  CHECK(false);  // TODO(scottmg): Remove FieldTrialList.
-  return field_trial;
-}
-
-// static
-FieldTrial* FieldTrialList::Find(const std::string& trial_name) {
-  if (!global_)
-    return nullptr;
-  AutoLock auto_lock(global_->lock_);
-  return global_->PreLockedFind(trial_name);
-}
-
-// static
-int FieldTrialList::FindValue(const std::string& trial_name) {
-  FieldTrial* field_trial = Find(trial_name);
-  if (field_trial)
-    return field_trial->group();
-  return FieldTrial::kNotFinalized;
-}
-
-// static
-std::string FieldTrialList::FindFullName(const std::string& trial_name) {
-  FieldTrial* field_trial = Find(trial_name);
-  if (field_trial)
-    return field_trial->group_name();
-  return std::string();
-}
-
-// static
-bool FieldTrialList::TrialExists(const std::string& trial_name) {
-  return Find(trial_name) != nullptr;
-}
-
-// static
-bool FieldTrialList::IsTrialActive(const std::string& trial_name) {
-  FieldTrial* field_trial = Find(trial_name);
-  FieldTrial::ActiveGroup active_group;
-  return field_trial && field_trial->GetActiveGroup(&active_group);
-}
-
-// static
-void FieldTrialList::StatesToString(std::string* output) {
-  FieldTrial::ActiveGroups active_groups;
-  GetActiveFieldTrialGroups(&active_groups);
-  for (FieldTrial::ActiveGroups::const_iterator it = active_groups.begin();
-       it != active_groups.end(); ++it) {
-    DCHECK_EQ(std::string::npos,
-              it->trial_name.find(kPersistentStringSeparator));
-    DCHECK_EQ(std::string::npos,
-              it->group_name.find(kPersistentStringSeparator));
-    output->append(it->trial_name);
-    output->append(1, kPersistentStringSeparator);
-    output->append(it->group_name);
-    output->append(1, kPersistentStringSeparator);
-  }
-}
-
-// static
-void FieldTrialList::AllStatesToString(std::string* output,
-                                       bool include_expired) {
-  if (!global_)
-    return;
-  AutoLock auto_lock(global_->lock_);
-
-  for (const auto& registered : global_->registered_) {
-    FieldTrial::State trial;
-    if (!registered.second->GetStateWhileLocked(&trial, include_expired))
-      continue;
-    DCHECK_EQ(std::string::npos,
-              trial.trial_name->find(kPersistentStringSeparator));
-    DCHECK_EQ(std::string::npos,
-              trial.group_name->find(kPersistentStringSeparator));
-    if (trial.activated)
-      output->append(1, kActivationMarker);
-    output->append(*trial.trial_name);
-    output->append(1, kPersistentStringSeparator);
-    output->append(*trial.group_name);
-    output->append(1, kPersistentStringSeparator);
-  }
-}
-
-// static
-std::string FieldTrialList::AllParamsToString(bool include_expired,
-                                              EscapeDataFunc encode_data_func) {
-  FieldTrialParamAssociator* params_associator =
-      FieldTrialParamAssociator::GetInstance();
-  std::string output;
-  for (const auto& registered : GetRegisteredTrials()) {
-    FieldTrial::State trial;
-    if (!registered.second->GetStateWhileLocked(&trial, include_expired))
-      continue;
-    DCHECK_EQ(std::string::npos,
-              trial.trial_name->find(kPersistentStringSeparator));
-    DCHECK_EQ(std::string::npos,
-              trial.group_name->find(kPersistentStringSeparator));
-    std::map<std::string, std::string> params;
-    if (params_associator->GetFieldTrialParamsWithoutFallback(
-            *trial.trial_name, *trial.group_name, &params)) {
-      if (params.size() > 0) {
-        // Add comma to seprate from previous entry if it exists.
-        if (!output.empty())
-          output.append(1, ',');
-
-        output.append(encode_data_func(*trial.trial_name));
-        output.append(1, '.');
-        output.append(encode_data_func(*trial.group_name));
-        output.append(1, ':');
-
-        std::string param_str;
-        for (const auto& param : params) {
-          // Add separator from previous param information if it exists.
-          if (!param_str.empty())
-            param_str.append(1, kPersistentStringSeparator);
-          param_str.append(encode_data_func(param.first));
-          param_str.append(1, kPersistentStringSeparator);
-          param_str.append(encode_data_func(param.second));
-        }
-
-        output.append(param_str);
-      }
-    }
-  }
-  return output;
-}
-
-// static
-void FieldTrialList::GetActiveFieldTrialGroups(
-    FieldTrial::ActiveGroups* active_groups) {
-  DCHECK(active_groups->empty());
-  if (!global_)
-    return;
-  AutoLock auto_lock(global_->lock_);
-
-  for (RegistrationMap::iterator it = global_->registered_.begin();
-       it != global_->registered_.end(); ++it) {
-    FieldTrial::ActiveGroup active_group;
-    if (it->second->GetActiveGroup(&active_group))
-      active_groups->push_back(active_group);
-  }
-}
-
-// static
-void FieldTrialList::GetActiveFieldTrialGroupsFromString(
-    const std::string& trials_string,
-    FieldTrial::ActiveGroups* active_groups) {
-  std::vector<FieldTrialStringEntry> entries;
-  if (!ParseFieldTrialsString(trials_string, &entries))
-    return;
-
-  for (const auto& entry : entries) {
-    if (entry.activated) {
-      FieldTrial::ActiveGroup group;
-      group.trial_name = entry.trial_name.as_string();
-      group.group_name = entry.group_name.as_string();
-      active_groups->push_back(group);
-    }
-  }
-}
-
-// static
-void FieldTrialList::GetInitiallyActiveFieldTrials(
-    const base::CommandLine& command_line,
-    FieldTrial::ActiveGroups* active_groups) {
-  DCHECK(global_);
-  DCHECK(global_->create_trials_from_command_line_called_);
-
-  if (!global_->field_trial_allocator_) {
-    GetActiveFieldTrialGroupsFromString(
-        command_line.GetSwitchValueASCII(switches::kForceFieldTrials),
-        active_groups);
-    return;
-  }
-
-  FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
-  FieldTrialAllocator::Iterator mem_iter(allocator);
-  const FieldTrial::FieldTrialEntry* entry;
-  while ((entry = mem_iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
-         nullptr) {
-    StringPiece trial_name;
-    StringPiece group_name;
-    if (subtle::NoBarrier_Load(&entry->activated) &&
-        entry->GetTrialAndGroupName(&trial_name, &group_name)) {
-      FieldTrial::ActiveGroup group;
-      group.trial_name = trial_name.as_string();
-      group.group_name = group_name.as_string();
-      active_groups->push_back(group);
-    }
-  }
-}
-
-// static
-bool FieldTrialList::CreateTrialsFromString(
-    const std::string& trials_string,
-    const std::set<std::string>& ignored_trial_names) {
-  DCHECK(global_);
-  if (trials_string.empty() || !global_)
-    return true;
-
-  std::vector<FieldTrialStringEntry> entries;
-  if (!ParseFieldTrialsString(trials_string, &entries))
-    return false;
-
-  for (const auto& entry : entries) {
-    const std::string trial_name = entry.trial_name.as_string();
-    const std::string group_name = entry.group_name.as_string();
-
-    if (ContainsKey(ignored_trial_names, trial_name)) {
-      // This is to warn that the field trial forced through command-line
-      // input is unforcable.
-      // Use --enable-logging or --enable-logging=stderr to see this warning.
-      LOG(WARNING) << "Field trial: " << trial_name << " cannot be forced.";
-      continue;
-    }
-
-    FieldTrial* trial = CreateFieldTrial(trial_name, group_name);
-    if (!trial)
-      return false;
-    if (entry.activated) {
-      // Call |group()| to mark the trial as "used" and notify observers, if
-      // any. This is useful to ensure that field trials created in child
-      // processes are properly reported in crash reports.
-      trial->group();
-    }
-  }
-  return true;
-}
-
-// static
-void FieldTrialList::CreateTrialsFromCommandLine(
-    const CommandLine& cmd_line,
-    const char* field_trial_handle_switch,
-    int fd_key) {
-  global_->create_trials_from_command_line_called_ = true;
-
-#if defined(OS_WIN) || defined(OS_FUCHSIA)
-  if (cmd_line.HasSwitch(field_trial_handle_switch)) {
-    std::string switch_value =
-        cmd_line.GetSwitchValueASCII(field_trial_handle_switch);
-    bool result = CreateTrialsFromSwitchValue(switch_value);
-    DCHECK(result);
-  }
-#elif defined(OS_POSIX) && !defined(OS_NACL)
-  // On POSIX, we check if the handle is valid by seeing if the browser process
-  // sent over the switch (we don't care about the value). Invalid handles
-  // occur in some browser tests which don't initialize the allocator.
-  if (cmd_line.HasSwitch(field_trial_handle_switch)) {
-    std::string switch_value =
-        cmd_line.GetSwitchValueASCII(field_trial_handle_switch);
-    bool result = CreateTrialsFromDescriptor(fd_key, switch_value);
-    DCHECK(result);
-  }
-#endif
-
-  if (cmd_line.HasSwitch(switches::kForceFieldTrials)) {
-    bool result = FieldTrialList::CreateTrialsFromString(
-        cmd_line.GetSwitchValueASCII(switches::kForceFieldTrials),
-        std::set<std::string>());
-    DCHECK(result);
-  }
-}
-
-// static
-void FieldTrialList::CreateFeaturesFromCommandLine(
-    const base::CommandLine& command_line,
-    const char* enable_features_switch,
-    const char* disable_features_switch,
-    FeatureList* feature_list) {
-  // Fallback to command line if not using shared memory.
-  if (!kUseSharedMemoryForFieldTrials ||
-      !global_->field_trial_allocator_.get()) {
-    return feature_list->InitializeFromCommandLine(
-        command_line.GetSwitchValueASCII(enable_features_switch),
-        command_line.GetSwitchValueASCII(disable_features_switch));
-  }
-
-  feature_list->InitializeFromSharedMemory(
-      global_->field_trial_allocator_.get());
-}
-
-#if defined(OS_WIN)
-// static
-void FieldTrialList::AppendFieldTrialHandleIfNeeded(
-    HandlesToInheritVector* handles) {
-  if (!global_)
-    return;
-  if (kUseSharedMemoryForFieldTrials) {
-    InstantiateFieldTrialAllocatorIfNeeded();
-    if (global_->readonly_allocator_handle_.IsValid())
-      handles->push_back(global_->readonly_allocator_handle_.GetHandle());
-  }
-}
-#elif defined(OS_FUCHSIA)
-// TODO(fuchsia): Implement shared-memory configuration (crbug.com/752368).
-#elif defined(OS_POSIX) && !defined(OS_NACL)
-// static
-SharedMemoryHandle FieldTrialList::GetFieldTrialHandle() {
-  if (global_ && kUseSharedMemoryForFieldTrials) {
-    InstantiateFieldTrialAllocatorIfNeeded();
-    // We check for an invalid handle where this gets called.
-    return global_->readonly_allocator_handle_;
-  }
-  return SharedMemoryHandle();
-}
-#endif
-
-// static
-void FieldTrialList::CopyFieldTrialStateToFlags(
-    const char* field_trial_handle_switch,
-    const char* enable_features_switch,
-    const char* disable_features_switch,
-    CommandLine* cmd_line) {
-  // TODO(lawrencewu): Ideally, having the global would be guaranteed. However,
-  // content browser tests currently don't create a FieldTrialList because they
-  // don't run ChromeBrowserMainParts code where it's done for Chrome.
-  // Some tests depend on the enable and disable features flag switch, though,
-  // so we can still add those even though AllStatesToString() will be a no-op.
-  if (!global_) {
-    AddFeatureAndFieldTrialFlags(enable_features_switch,
-                                 disable_features_switch, cmd_line);
-    return;
-  }
-
-  // Use shared memory to pass the state if the feature is enabled, otherwise
-  // fallback to passing it via the command line as a string.
-  if (kUseSharedMemoryForFieldTrials) {
-    InstantiateFieldTrialAllocatorIfNeeded();
-    // If the readonly handle didn't get duplicated properly, then fallback to
-    // original behavior.
-    if (!global_->readonly_allocator_handle_.IsValid()) {
-      AddFeatureAndFieldTrialFlags(enable_features_switch,
-                                   disable_features_switch, cmd_line);
-      return;
-    }
-
-    global_->field_trial_allocator_->UpdateTrackingHistograms();
-    std::string switch_value = SerializeSharedMemoryHandleMetadata(
-        global_->readonly_allocator_handle_);
-    cmd_line->AppendSwitchASCII(field_trial_handle_switch, switch_value);
-
-    // Append --enable-features and --disable-features switches corresponding
-    // to the features enabled on the command-line, so that child and browser
-    // process command lines match and clearly show what has been specified
-    // explicitly by the user.
-    std::string enabled_features;
-    std::string disabled_features;
-    FeatureList::GetInstance()->GetCommandLineFeatureOverrides(
-        &enabled_features, &disabled_features);
-
-    if (!enabled_features.empty())
-      cmd_line->AppendSwitchASCII(enable_features_switch, enabled_features);
-    if (!disabled_features.empty())
-      cmd_line->AppendSwitchASCII(disable_features_switch, disabled_features);
-
-    return;
-  }
-
-  AddFeatureAndFieldTrialFlags(enable_features_switch, disable_features_switch,
-                               cmd_line);
-}
-
-// static
-FieldTrial* FieldTrialList::CreateFieldTrial(
-    const std::string& name,
-    const std::string& group_name) {
-  DCHECK(global_);
-  DCHECK_GE(name.size(), 0u);
-  DCHECK_GE(group_name.size(), 0u);
-  if (name.empty() || group_name.empty() || !global_)
-    return nullptr;
-
-  FieldTrial* field_trial = FieldTrialList::Find(name);
-  if (field_trial) {
-    // In single process mode, or when we force them from the command line,
-    // we may have already created the field trial.
-    if (field_trial->group_name_internal() != group_name)
-      return nullptr;
-    return field_trial;
-  }
-  const int kTotalProbability = 100;
-  field_trial = new FieldTrial(name, kTotalProbability, group_name, 0);
-  FieldTrialList::Register(field_trial);
-  // Force the trial, which will also finalize the group choice.
-  field_trial->SetForced();
-  return field_trial;
-}
-
-// static
-bool FieldTrialList::AddObserver(Observer* observer) {
-  if (!global_)
-    return false;
-  global_->observer_list_->AddObserver(observer);
-  return true;
-}
-
-// static
-void FieldTrialList::RemoveObserver(Observer* observer) {
-  if (!global_)
-    return;
-  global_->observer_list_->RemoveObserver(observer);
-}
-
-// static
-void FieldTrialList::SetSynchronousObserver(Observer* observer) {
-  DCHECK(!global_->synchronous_observer_);
-  global_->synchronous_observer_ = observer;
-}
-
-// static
-void FieldTrialList::RemoveSynchronousObserver(Observer* observer) {
-  DCHECK_EQ(global_->synchronous_observer_, observer);
-  global_->synchronous_observer_ = nullptr;
-}
-
-// static
-void FieldTrialList::OnGroupFinalized(bool is_locked, FieldTrial* field_trial) {
-  if (!global_)
-    return;
-  if (is_locked) {
-    AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
-                              field_trial);
-  } else {
-    AutoLock auto_lock(global_->lock_);
-    AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
-                              field_trial);
-  }
-}
-
-// static
-void FieldTrialList::NotifyFieldTrialGroupSelection(FieldTrial* field_trial) {
-  if (!global_)
-    return;
-
-  {
-    AutoLock auto_lock(global_->lock_);
-    if (field_trial->group_reported_)
-      return;
-    field_trial->group_reported_ = true;
-
-    if (!field_trial->enable_field_trial_)
-      return;
-
-    if (kUseSharedMemoryForFieldTrials)
-      ActivateFieldTrialEntryWhileLocked(field_trial);
-  }
-
-  // Recording for stability debugging has to be done inline as a task posted
-  // to an observer may not get executed before a crash.
-  base::debug::GlobalActivityTracker* tracker =
-      base::debug::GlobalActivityTracker::Get();
-  if (tracker) {
-    tracker->RecordFieldTrial(field_trial->trial_name(),
-                              field_trial->group_name_internal());
-  }
-
-  if (global_->synchronous_observer_) {
-    global_->synchronous_observer_->OnFieldTrialGroupFinalized(
-        field_trial->trial_name(), field_trial->group_name_internal());
-  }
-
-  global_->observer_list_->Notify(
-      FROM_HERE, &FieldTrialList::Observer::OnFieldTrialGroupFinalized,
-      field_trial->trial_name(), field_trial->group_name_internal());
-}
-
-// static
-size_t FieldTrialList::GetFieldTrialCount() {
-  if (!global_)
-    return 0;
-  AutoLock auto_lock(global_->lock_);
-  return global_->registered_.size();
-}
-
-// static
-bool FieldTrialList::GetParamsFromSharedMemory(
-    FieldTrial* field_trial,
-    std::map<std::string, std::string>* params) {
-  DCHECK(global_);
-  // If the field trial allocator is not set up yet, then there are several
-  // cases:
-  //   - We are in the browser process and the allocator has not been set up
-  //   yet. If we got here, then we couldn't find the params in
-  //   FieldTrialParamAssociator, so it's definitely not here. Return false.
-  //   - Using shared memory for field trials is not enabled. If we got here,
-  //   then there's nothing in shared memory. Return false.
-  //   - We are in the child process and the allocator has not been set up yet.
-  //   If this is the case, then you are calling this too early. The field trial
-  //   allocator should get set up very early in the lifecycle. Try to see if
-  //   you can call it after it's been set up.
-  AutoLock auto_lock(global_->lock_);
-  if (!global_->field_trial_allocator_)
-    return false;
-
-  // If ref_ isn't set, then the field trial data can't be in shared memory.
-  if (!field_trial->ref_)
-    return false;
-
-  const FieldTrial::FieldTrialEntry* entry =
-      global_->field_trial_allocator_->GetAsObject<FieldTrial::FieldTrialEntry>(
-          field_trial->ref_);
-
-  size_t allocated_size =
-      global_->field_trial_allocator_->GetAllocSize(field_trial->ref_);
-  size_t actual_size = sizeof(FieldTrial::FieldTrialEntry) + entry->pickle_size;
-  if (allocated_size < actual_size)
-    return false;
-
-  return entry->GetParams(params);
-}
-
-// static
-void FieldTrialList::ClearParamsFromSharedMemoryForTesting() {
-  if (!global_)
-    return;
-
-  AutoLock auto_lock(global_->lock_);
-  if (!global_->field_trial_allocator_)
-    return;
-
-  // To clear the params, we iterate through every item in the allocator, copy
-  // just the trial and group name into a newly-allocated segment and then clear
-  // the existing item.
-  FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
-  FieldTrialAllocator::Iterator mem_iter(allocator);
-
-  // List of refs to eventually be made iterable. We can't make it in the loop,
-  // since it would go on forever.
-  std::vector<FieldTrial::FieldTrialRef> new_refs;
-
-  FieldTrial::FieldTrialRef prev_ref;
-  while ((prev_ref = mem_iter.GetNextOfType<FieldTrial::FieldTrialEntry>()) !=
-         FieldTrialAllocator::kReferenceNull) {
-    // Get the existing field trial entry in shared memory.
-    const FieldTrial::FieldTrialEntry* prev_entry =
-        allocator->GetAsObject<FieldTrial::FieldTrialEntry>(prev_ref);
-    StringPiece trial_name;
-    StringPiece group_name;
-    if (!prev_entry->GetTrialAndGroupName(&trial_name, &group_name))
-      continue;
-
-    // Write a new entry, minus the params.
-    Pickle pickle;
-    pickle.WriteString(trial_name);
-    pickle.WriteString(group_name);
-    size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size();
-    FieldTrial::FieldTrialEntry* new_entry =
-        allocator->New<FieldTrial::FieldTrialEntry>(total_size);
-    subtle::NoBarrier_Store(&new_entry->activated,
-                            subtle::NoBarrier_Load(&prev_entry->activated));
-    new_entry->pickle_size = pickle.size();
-
-    // TODO(lawrencewu): Modify base::Pickle to be able to write over a section
-    // in memory, so we can avoid this memcpy.
-    char* dst = reinterpret_cast<char*>(new_entry) +
-                sizeof(FieldTrial::FieldTrialEntry);
-    memcpy(dst, pickle.data(), pickle.size());
-
-    // Update the ref on the field trial and add it to the list to be made
-    // iterable.
-    FieldTrial::FieldTrialRef new_ref = allocator->GetAsReference(new_entry);
-    FieldTrial* trial = global_->PreLockedFind(trial_name.as_string());
-    trial->ref_ = new_ref;
-    new_refs.push_back(new_ref);
-
-    // Mark the existing entry as unused.
-    allocator->ChangeType(prev_ref, 0,
-                          FieldTrial::FieldTrialEntry::kPersistentTypeId,
-                          /*clear=*/false);
-  }
-
-  for (const auto& ref : new_refs) {
-    allocator->MakeIterable(ref);
-  }
-}
-
-// static
-void FieldTrialList::DumpAllFieldTrialsToPersistentAllocator(
-    PersistentMemoryAllocator* allocator) {
-  if (!global_)
-    return;
-  AutoLock auto_lock(global_->lock_);
-  for (const auto& registered : global_->registered_) {
-    AddToAllocatorWhileLocked(allocator, registered.second);
-  }
-}
-
-// static
-std::vector<const FieldTrial::FieldTrialEntry*>
-FieldTrialList::GetAllFieldTrialsFromPersistentAllocator(
-    PersistentMemoryAllocator const& allocator) {
-  std::vector<const FieldTrial::FieldTrialEntry*> entries;
-  FieldTrialAllocator::Iterator iter(&allocator);
-  const FieldTrial::FieldTrialEntry* entry;
-  while ((entry = iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
-         nullptr) {
-    entries.push_back(entry);
-  }
-  return entries;
-}
-
-// static
-bool FieldTrialList::IsGlobalSetForTesting() {
-  return global_ != nullptr;
-}
-
-// static
-std::string FieldTrialList::SerializeSharedMemoryHandleMetadata(
-    const SharedMemoryHandle& shm) {
-  std::stringstream ss;
-#if defined(OS_WIN)
-  // Tell the child process the name of the inherited HANDLE.
-  uintptr_t uintptr_handle = reinterpret_cast<uintptr_t>(shm.GetHandle());
-  ss << uintptr_handle << ",";
-#elif defined(OS_FUCHSIA)
-  ss << shm.GetHandle() << ",";
-#elif !defined(OS_POSIX)
-#error Unsupported OS
-#endif
-
-  base::UnguessableToken guid = shm.GetGUID();
-  ss << guid.GetHighForSerialization() << "," << guid.GetLowForSerialization();
-  ss << "," << shm.GetSize();
-  return ss.str();
-}
-
-#if defined(OS_WIN) || defined(OS_FUCHSIA)
-
-// static
-SharedMemoryHandle FieldTrialList::DeserializeSharedMemoryHandleMetadata(
-    const std::string& switch_value) {
-  std::vector<base::StringPiece> tokens = base::SplitStringPiece(
-      switch_value, ",", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
-
-  if (tokens.size() != 4)
-    return SharedMemoryHandle();
-
-  int field_trial_handle = 0;
-  if (!base::StringToInt(tokens[0], &field_trial_handle))
-    return SharedMemoryHandle();
-#if defined(OS_FUCHSIA)
-  zx_handle_t handle = static_cast<zx_handle_t>(field_trial_handle);
-#elif defined(OS_WIN)
-  HANDLE handle =
-      reinterpret_cast<HANDLE>(static_cast<uintptr_t>(field_trial_handle));
-  if (base::IsCurrentProcessElevated()) {
-    // base::LaunchElevatedProcess doesn't have a way to duplicate the handle,
-    // but this process can since by definition it's not sandboxed.
-    base::ProcessId parent_pid = base::GetParentProcessId(GetCurrentProcess());
-    HANDLE parent_handle = OpenProcess(PROCESS_ALL_ACCESS, FALSE, parent_pid);
-    DuplicateHandle(parent_handle, handle, GetCurrentProcess(), &handle, 0,
-                    FALSE, DUPLICATE_SAME_ACCESS);
-    CloseHandle(parent_handle);
-  }
-#endif  // defined(OS_WIN)
-
-  base::UnguessableToken guid;
-  if (!DeserializeGUIDFromStringPieces(tokens[1], tokens[2], &guid))
-    return SharedMemoryHandle();
-
-  int size;
-  if (!base::StringToInt(tokens[3], &size))
-    return SharedMemoryHandle();
-
-  return SharedMemoryHandle(handle, static_cast<size_t>(size), guid);
-}
-
-#elif defined(OS_POSIX) && !defined(OS_NACL)
-
-// static
-SharedMemoryHandle FieldTrialList::DeserializeSharedMemoryHandleMetadata(
-    int fd,
-    const std::string& switch_value) {
-  std::vector<base::StringPiece> tokens = base::SplitStringPiece(
-      switch_value, ",", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
-
-  if (tokens.size() != 3)
-    return SharedMemoryHandle();
-
-  base::UnguessableToken guid;
-  if (!DeserializeGUIDFromStringPieces(tokens[0], tokens[1], &guid))
-    return SharedMemoryHandle();
-
-  int size;
-  if (!base::StringToInt(tokens[2], &size))
-    return SharedMemoryHandle();
-
-  return SharedMemoryHandle(FileDescriptor(fd, true), static_cast<size_t>(size),
-                            guid);
-}
-
-#endif
-
-#if defined(OS_WIN) || defined(OS_FUCHSIA)
-// static
-bool FieldTrialList::CreateTrialsFromSwitchValue(
-    const std::string& switch_value) {
-  SharedMemoryHandle shm = DeserializeSharedMemoryHandleMetadata(switch_value);
-  if (!shm.IsValid())
-    return false;
-  return FieldTrialList::CreateTrialsFromSharedMemoryHandle(shm);
-}
-#elif defined(OS_POSIX) && !defined(OS_NACL)
-// static
-bool FieldTrialList::CreateTrialsFromDescriptor(
-    int fd_key,
-    const std::string& switch_value) {
-  if (!kUseSharedMemoryForFieldTrials)
-    return false;
-
-  if (fd_key == -1)
-    return false;
-
-  int fd = GlobalDescriptors::GetInstance()->MaybeGet(fd_key);
-  if (fd == -1)
-    return false;
-
-  SharedMemoryHandle shm =
-      DeserializeSharedMemoryHandleMetadata(fd, switch_value);
-  if (!shm.IsValid())
-    return false;
-
-  bool result = FieldTrialList::CreateTrialsFromSharedMemoryHandle(shm);
-  DCHECK(result);
-  return true;
-}
-#endif  // defined(OS_POSIX) && !defined(OS_NACL)
-
-// static
-bool FieldTrialList::CreateTrialsFromSharedMemoryHandle(
-    SharedMemoryHandle shm_handle) {
-  // shm gets deleted when it gets out of scope, but that's OK because we need
-  // it only for the duration of this method.
-  std::unique_ptr<SharedMemory> shm(new SharedMemory(shm_handle, true));
-  if (!shm.get()->Map(kFieldTrialAllocationSize))
-    OnOutOfMemory(kFieldTrialAllocationSize);
-
-  return FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
-}
-
-// static
-bool FieldTrialList::CreateTrialsFromSharedMemory(
-    std::unique_ptr<SharedMemory> shm) {
-  global_->field_trial_allocator_.reset(
-      new FieldTrialAllocator(std::move(shm), 0, kAllocatorName, true));
-  FieldTrialAllocator* shalloc = global_->field_trial_allocator_.get();
-  FieldTrialAllocator::Iterator mem_iter(shalloc);
-
-  const FieldTrial::FieldTrialEntry* entry;
-  while ((entry = mem_iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
-         nullptr) {
-    StringPiece trial_name;
-    StringPiece group_name;
-    if (!entry->GetTrialAndGroupName(&trial_name, &group_name))
-      return false;
-
-    // TODO(lawrencewu): Convert the API for CreateFieldTrial to take
-    // StringPieces.
-    FieldTrial* trial =
-        CreateFieldTrial(trial_name.as_string(), group_name.as_string());
-
-    trial->ref_ = mem_iter.GetAsReference(entry);
-    if (subtle::NoBarrier_Load(&entry->activated)) {
-      // Call |group()| to mark the trial as "used" and notify observers, if
-      // any. This is useful to ensure that field trials created in child
-      // processes are properly reported in crash reports.
-      trial->group();
-    }
-  }
-  return true;
-}
-
-// static
-void FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded() {
-  if (!global_)
-    return;
-  AutoLock auto_lock(global_->lock_);
-  // Create the allocator if not already created and add all existing trials.
-  if (global_->field_trial_allocator_ != nullptr)
-    return;
-
-  SharedMemoryCreateOptions options;
-  options.size = kFieldTrialAllocationSize;
-  options.share_read_only = true;
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  options.type = SharedMemoryHandle::POSIX;
-#endif
-
-  std::unique_ptr<SharedMemory> shm(new SharedMemory());
-  if (!shm->Create(options))
-    OnOutOfMemory(kFieldTrialAllocationSize);
-
-  if (!shm->Map(kFieldTrialAllocationSize))
-    OnOutOfMemory(kFieldTrialAllocationSize);
-
-  global_->field_trial_allocator_.reset(
-      new FieldTrialAllocator(std::move(shm), 0, kAllocatorName, false));
-  global_->field_trial_allocator_->CreateTrackingHistograms(kAllocatorName);
-
-  // Add all existing field trials.
-  for (const auto& registered : global_->registered_) {
-    AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
-                              registered.second);
-  }
-
-  // Add all existing features.
-  FeatureList::GetInstance()->AddFeaturesToAllocator(
-      global_->field_trial_allocator_.get());
-
-#if !defined(OS_NACL)
-  global_->readonly_allocator_handle_ = GetSharedMemoryReadOnlyHandle(
-      global_->field_trial_allocator_->shared_memory());
-#endif
-}
-
-// static
-void FieldTrialList::AddToAllocatorWhileLocked(
-    PersistentMemoryAllocator* allocator,
-    FieldTrial* field_trial) {
-  // Don't do anything if the allocator hasn't been instantiated yet.
-  if (allocator == nullptr)
-    return;
-
-  // Or if the allocator is read only, which means we are in a child process and
-  // shouldn't be writing to it.
-  if (allocator->IsReadonly())
-    return;
-
-  FieldTrial::State trial_state;
-  if (!field_trial->GetStateWhileLocked(&trial_state, false))
-    return;
-
-  // Or if we've already added it. We must check after GetState since it can
-  // also add to the allocator.
-  if (field_trial->ref_)
-    return;
-
-  Pickle pickle;
-  PickleFieldTrial(trial_state, &pickle);
-
-  size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size();
-  FieldTrial::FieldTrialRef ref = allocator->Allocate(
-      total_size, FieldTrial::FieldTrialEntry::kPersistentTypeId);
-  if (ref == FieldTrialAllocator::kReferenceNull) {
-    NOTREACHED();
-    return;
-  }
-
-  FieldTrial::FieldTrialEntry* entry =
-      allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref);
-  subtle::NoBarrier_Store(&entry->activated, trial_state.activated);
-  entry->pickle_size = pickle.size();
-
-  // TODO(lawrencewu): Modify base::Pickle to be able to write over a section in
-  // memory, so we can avoid this memcpy.
-  char* dst =
-      reinterpret_cast<char*>(entry) + sizeof(FieldTrial::FieldTrialEntry);
-  memcpy(dst, pickle.data(), pickle.size());
-
-  allocator->MakeIterable(ref);
-  field_trial->ref_ = ref;
-}
-
-// static
-void FieldTrialList::ActivateFieldTrialEntryWhileLocked(
-    FieldTrial* field_trial) {
-  FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
-
-  // Check if we're in the child process and return early if so.
-  if (!allocator || allocator->IsReadonly())
-    return;
-
-  FieldTrial::FieldTrialRef ref = field_trial->ref_;
-  if (ref == FieldTrialAllocator::kReferenceNull) {
-    // It's fine to do this even if the allocator hasn't been instantiated
-    // yet -- it'll just return early.
-    AddToAllocatorWhileLocked(allocator, field_trial);
-  } else {
-    // It's also okay to do this even though the callee doesn't have a lock --
-    // the only thing that happens on a stale read here is a slight performance
-    // hit from the child re-synchronizing activation state.
-    FieldTrial::FieldTrialEntry* entry =
-        allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref);
-    subtle::NoBarrier_Store(&entry->activated, 1);
-  }
-}
-
-// static
-const FieldTrial::EntropyProvider*
-    FieldTrialList::GetEntropyProviderForOneTimeRandomization() {
-  if (!global_) {
-    used_without_global_ = true;
-    return nullptr;
-  }
-
-  return global_->entropy_provider_.get();
-}
-
-FieldTrial* FieldTrialList::PreLockedFind(const std::string& name) {
-  RegistrationMap::iterator it = registered_.find(name);
-  if (registered_.end() == it)
-    return nullptr;
-  return it->second;
-}
-
-// static
-void FieldTrialList::Register(FieldTrial* trial) {
-  if (!global_) {
-    used_without_global_ = true;
-    return;
-  }
-  AutoLock auto_lock(global_->lock_);
-  CHECK(!global_->PreLockedFind(trial->trial_name())) << trial->trial_name();
-  trial->AddRef();
-  trial->SetTrialRegistered();
-  global_->registered_[trial->trial_name()] = trial;
-}
-
-// static
-FieldTrialList::RegistrationMap FieldTrialList::GetRegisteredTrials() {
-  RegistrationMap output;
-  if (global_) {
-    AutoLock auto_lock(global_->lock_);
-    output = global_->registered_;
-  }
-  return output;
-}
-
-}  // namespace base
diff --git a/base/metrics/field_trial.h b/base/metrics/field_trial.h
deleted file mode 100644
index d5d6cb8..0000000
--- a/base/metrics/field_trial.h
+++ /dev/null
@@ -1,802 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// FieldTrial is a class for handling details of statistical experiments
-// performed by actual users in the field (i.e., in a shipped or beta product).
-// All code is called exclusively on the UI thread currently.
-//
-// The simplest example is an experiment to see whether one of two options
-// produces "better" results across our user population.  In that scenario, UMA
-// data is uploaded to aggregate the test results, and this FieldTrial class
-// manages the state of each such experiment (state == which option was
-// pseudo-randomly selected).
-//
-// States are typically generated randomly, either based on a one time
-// randomization (which will yield the same results, in terms of selecting
-// the client for a field trial or not, for every run of the program on a
-// given machine), or by a session randomization (generated each time the
-// application starts up, but held constant during the duration of the
-// process).
-
-//------------------------------------------------------------------------------
-// Example:  Suppose we have an experiment involving memory, such as determining
-// the impact of some pruning algorithm.
-// We assume that we already have a histogram of memory usage, such as:
-
-//   UMA_HISTOGRAM_COUNTS("Memory.RendererTotal", count);
-
-// Somewhere in main thread initialization code, we'd probably define an
-// instance of a FieldTrial, with code such as:
-
-// // FieldTrials are reference counted, and persist automagically until
-// // process teardown, courtesy of their automatic registration in
-// // FieldTrialList.
-// // Note: This field trial will run in Chrome instances compiled through
-// //       8 July, 2015, and after that all instances will be in "StandardMem".
-// scoped_refptr<base::FieldTrial> trial(
-//     base::FieldTrialList::FactoryGetFieldTrial(
-//         "MemoryExperiment", 1000, "StandardMem", 2015, 7, 8,
-//         base::FieldTrial::ONE_TIME_RANDOMIZED, NULL));
-//
-// const int high_mem_group =
-//     trial->AppendGroup("HighMem", 20);  // 2% in HighMem group.
-// const int low_mem_group =
-//     trial->AppendGroup("LowMem", 20);   // 2% in LowMem group.
-// // Take action depending of which group we randomly land in.
-// if (trial->group() == high_mem_group)
-//   SetPruningAlgorithm(kType1);  // Sample setting of browser state.
-// else if (trial->group() == low_mem_group)
-//   SetPruningAlgorithm(kType2);  // Sample alternate setting.
-
-//------------------------------------------------------------------------------
-
-#ifndef BASE_METRICS_FIELD_TRIAL_H_
-#define BASE_METRICS_FIELD_TRIAL_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <map>
-#include <memory>
-#include <set>
-#include <string>
-#include <vector>
-
-#include "base/atomicops.h"
-#include "base/base_export.h"
-#include "base/command_line.h"
-#include "base/feature_list.h"
-#include "base/files/file.h"
-#include "base/gtest_prod_util.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/shared_memory.h"
-#include "base/memory/shared_memory_handle.h"
-#include "base/metrics/persistent_memory_allocator.h"
-#include "base/observer_list_threadsafe.h"
-#include "base/pickle.h"
-#include "base/process/launch.h"
-#include "base/strings/string_piece.h"
-#include "base/synchronization/lock.h"
-#include "base/time/time.h"
-#include "build_config.h"
-
-namespace base {
-
-class FieldTrialList;
-
-class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
- public:
-  typedef int Probability;  // Probability type for being selected in a trial.
-
-  // TODO(665129): Make private again after crash has been resolved.
-  typedef SharedPersistentMemoryAllocator::Reference FieldTrialRef;
-
-  // Specifies the persistence of the field trial group choice.
-  enum RandomizationType {
-    // One time randomized trials will persist the group choice between
-    // restarts, which is recommended for most trials, especially those that
-    // change user visible behavior.
-    ONE_TIME_RANDOMIZED,
-    // Session randomized trials will roll the dice to select a group on every
-    // process restart.
-    SESSION_RANDOMIZED,
-  };
-
-  // EntropyProvider is an interface for providing entropy for one-time
-  // randomized (persistent) field trials.
-  class BASE_EXPORT EntropyProvider {
-   public:
-    virtual ~EntropyProvider();
-
-    // Returns a double in the range of [0, 1) to be used for the dice roll for
-    // the specified field trial. If |randomization_seed| is not 0, it will be
-    // used in preference to |trial_name| for generating the entropy by entropy
-    // providers that support it. A given instance should always return the same
-    // value given the same input |trial_name| and |randomization_seed| values.
-    virtual double GetEntropyForTrial(const std::string& trial_name,
-                                      uint32_t randomization_seed) const = 0;
-  };
-
-  // A pair representing a Field Trial and its selected group.
-  struct ActiveGroup {
-    std::string trial_name;
-    std::string group_name;
-  };
-
-  // A triplet representing a FieldTrial, its selected group and whether it's
-  // active. String members are pointers to the underlying strings owned by the
-  // FieldTrial object. Does not use StringPiece to avoid conversions back to
-  // std::string.
-  struct BASE_EXPORT State {
-    const std::string* trial_name = nullptr;
-    const std::string* group_name = nullptr;
-    bool activated = false;
-
-    State();
-    State(const State& other);
-    ~State();
-  };
-
-  // We create one FieldTrialEntry per field trial in shared memory, via
-  // AddToAllocatorWhileLocked. The FieldTrialEntry is followed by a
-  // base::Pickle object that we unpickle and read from.
-  struct BASE_EXPORT FieldTrialEntry {
-    // SHA1(FieldTrialEntry): Increment this if structure changes!
-    static constexpr uint32_t kPersistentTypeId = 0xABA17E13 + 2;
-
-    // Expected size for 32/64-bit check.
-    static constexpr size_t kExpectedInstanceSize = 8;
-
-    // Whether or not this field trial is activated. This is really just a
-    // boolean but using a 32 bit value for portability reasons. It should be
-    // accessed via NoBarrier_Load()/NoBarrier_Store() to prevent the compiler
-    // from doing unexpected optimizations because it thinks that only one
-    // thread is accessing the memory location.
-    subtle::Atomic32 activated;
-
-    // Size of the pickled structure, NOT the total size of this entry.
-    uint32_t pickle_size;
-
-    // Calling this is only valid when the entry is initialized. That is, it
-    // resides in shared memory and has a pickle containing the trial name and
-    // group name following it.
-    bool GetTrialAndGroupName(StringPiece* trial_name,
-                              StringPiece* group_name) const;
-
-    // Calling this is only valid when the entry is initialized as well. Reads
-    // the parameters following the trial and group name and stores them as
-    // key-value mappings in |params|.
-    bool GetParams(std::map<std::string, std::string>* params) const;
-
-   private:
-    // Returns an iterator over the data containing names and params.
-    PickleIterator GetPickleIterator() const;
-
-    // Takes the iterator and writes out the first two items into |trial_name|
-    // and |group_name|.
-    bool ReadStringPair(PickleIterator* iter,
-                        StringPiece* trial_name,
-                        StringPiece* group_name) const;
-  };
-
-  typedef std::vector<ActiveGroup> ActiveGroups;
-
-  // A return value to indicate that a given instance has not yet had a group
-  // assignment (and hence is not yet participating in the trial).
-  static const int kNotFinalized;
-
-  // Disables this trial, meaning it always determines the default group
-  // has been selected. May be called immediately after construction, or
-  // at any time after initialization (should not be interleaved with
-  // AppendGroup calls). Once disabled, there is no way to re-enable a
-  // trial.
-  // TODO(mad): http://code.google.com/p/chromium/issues/detail?id=121446
-  // This doesn't properly reset to Default when a group was forced.
-  void Disable();
-
-  // Establish the name and probability of the next group in this trial.
-  // Sometimes, based on construction randomization, this call may cause the
-  // provided group to be *THE* group selected for use in this instance.
-  // The return value is the group number of the new group.
-  int AppendGroup(const std::string& name, Probability group_probability);
-
-  // Return the name of the FieldTrial (excluding the group name).
-  const std::string& trial_name() const { return trial_name_; }
-
-  // Return the randomly selected group number that was assigned, and notify
-  // any/all observers that this finalized group number has presumably been used
-  // (queried), and will never change. Note that this will force an instance to
-  // participate, and make it illegal to attempt to probabilistically add any
-  // other groups to the trial.
-  int group();
-
-  // If the group's name is empty, a string version containing the group number
-  // is used as the group name. This causes a winner to be chosen if none was.
-  const std::string& group_name();
-
-  // Finalizes the group choice and returns the chosen group, but does not mark
-  // the trial as active - so its state will not be reported until group_name()
-  // or similar is called.
-  const std::string& GetGroupNameWithoutActivation();
-
-  // Set the field trial as forced, meaning that it was setup earlier than
-  // the hard coded registration of the field trial to override it.
-  // This allows the code that was hard coded to register the field trial to
-  // still succeed even though the field trial has already been registered.
-  // This must be called after appending all the groups, since we will make
-  // the group choice here. Note that this is a NOOP for already forced trials.
-  // And, as the rest of the FieldTrial code, this is not thread safe and must
-  // be done from the UI thread.
-  void SetForced();
-
-  // Enable benchmarking sets field trials to a common setting.
-  static void EnableBenchmarking();
-
-  // Creates a FieldTrial object with the specified parameters, to be used for
-  // simulation of group assignment without actually affecting global field
-  // trial state in the running process. Group assignment will be done based on
-  // |entropy_value|, which must have a range of [0, 1).
-  //
-  // Note: Using this function will not register the field trial globally in the
-  // running process - for that, use FieldTrialList::FactoryGetFieldTrial().
-  //
-  // The ownership of the returned FieldTrial is transfered to the caller which
-  // is responsible for deref'ing it (e.g. by using scoped_refptr<FieldTrial>).
-  static FieldTrial* CreateSimulatedFieldTrial(
-      const std::string& trial_name,
-      Probability total_probability,
-      const std::string& default_group_name,
-      double entropy_value);
-
- private:
-  // Allow tests to access our innards for testing purposes.
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, Registration);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, AbsoluteProbabilities);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, RemainingProbability);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, FiftyFiftyProbability);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, MiddleProbabilities);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, OneWinner);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, DisableProbability);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, ActiveGroups);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, AllGroups);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, ActiveGroupsNotFinalized);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, Save);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SaveAll);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, DuplicateRestore);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedTurnFeatureOff);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedTurnFeatureOn);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedChangeDefault_Default);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedChangeDefault_NonDefault);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, FloatBoundariesGiveEqualGroupSizes);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, DoesNotSurpassTotalProbability);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
-                           DoNotAddSimulatedFieldTrialsToAllocator);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, ClearParamsFromSharedMemory);
-
-  friend class base::FieldTrialList;
-
-  friend class RefCounted<FieldTrial>;
-
-  // This is the group number of the 'default' group when a choice wasn't forced
-  // by a call to FieldTrialList::CreateFieldTrial. It is kept private so that
-  // consumers don't use it by mistake in cases where the group was forced.
-  static const int kDefaultGroupNumber;
-
-  // Creates a field trial with the specified parameters. Group assignment will
-  // be done based on |entropy_value|, which must have a range of [0, 1).
-  FieldTrial(const std::string& trial_name,
-             Probability total_probability,
-             const std::string& default_group_name,
-             double entropy_value);
-  virtual ~FieldTrial();
-
-  // Return the default group name of the FieldTrial.
-  std::string default_group_name() const { return default_group_name_; }
-
-  // Marks this trial as having been registered with the FieldTrialList. Must be
-  // called no more than once and before any |group()| calls have occurred.
-  void SetTrialRegistered();
-
-  // Sets the chosen group name and number.
-  void SetGroupChoice(const std::string& group_name, int number);
-
-  // Ensures that a group is chosen, if it hasn't yet been. The field trial
-  // might yet be disabled, so this call will *not* notify observers of the
-  // status.
-  void FinalizeGroupChoice();
-
-  // Implements FinalizeGroupChoice() with the added flexibility of being
-  // deadlock-free if |is_locked| is true and the caller is holding a lock.
-  void FinalizeGroupChoiceImpl(bool is_locked);
-
-  // Returns the trial name and selected group name for this field trial via
-  // the output parameter |active_group|, but only if the group has already
-  // been chosen and has been externally observed via |group()| and the trial
-  // has not been disabled. In that case, true is returned and |active_group|
-  // is filled in; otherwise, the result is false and |active_group| is left
-  // untouched.
-  bool GetActiveGroup(ActiveGroup* active_group) const;
-
-  // Returns the trial name and selected group name for this field trial via
-  // the output parameter |field_trial_state| for all the studies when
-  // |bool include_expired| is true. In case when |bool include_expired| is
-  // false, if the trial has not been disabled true is returned and
-  // |field_trial_state| is filled in; otherwise, the result is false and
-  // |field_trial_state| is left untouched.
-  // This function is deadlock-free if the caller is holding a lock.
-  bool GetStateWhileLocked(State* field_trial_state, bool include_expired);
-
-  // Returns the group_name. A winner need not have been chosen.
-  std::string group_name_internal() const { return group_name_; }
-
-  // The name of the field trial, as can be found via the FieldTrialList.
-  const std::string trial_name_;
-
-  // The maximum sum of all probabilities supplied, which corresponds to 100%.
-  // This is the scaling factor used to adjust supplied probabilities.
-  const Probability divisor_;
-
-  // The name of the default group.
-  const std::string default_group_name_;
-
-  // The randomly selected probability that is used to select a group (or have
-  // the instance not participate).  It is the product of divisor_ and a random
-  // number between [0, 1).
-  Probability random_;
-
-  // Sum of the probabilities of all appended groups.
-  Probability accumulated_group_probability_;
-
-  // The number that will be returned by the next AppendGroup() call.
-  int next_group_number_;
-
-  // The pseudo-randomly assigned group number.
-  // This is kNotFinalized if no group has been assigned.
-  int group_;
-
-  // A textual name for the randomly selected group. Valid after |group()|
-  // has been called.
-  std::string group_name_;
-
-  // When enable_field_trial_ is false, field trial reverts to the 'default'
-  // group.
-  bool enable_field_trial_;
-
-  // When forced_ is true, we return the chosen group from AppendGroup when
-  // appropriate.
-  bool forced_;
-
-  // Specifies whether the group choice has been reported to observers.
-  bool group_reported_;
-
-  // Whether this trial is registered with the global FieldTrialList and thus
-  // should notify it when its group is queried.
-  bool trial_registered_;
-
-  // Reference to related field trial struct and data in shared memory.
-  FieldTrialRef ref_;
-
-  // When benchmarking is enabled, field trials all revert to the 'default'
-  // group.
-  static bool enable_benchmarking_;
-
-  DISALLOW_COPY_AND_ASSIGN(FieldTrial);
-};
-
-//------------------------------------------------------------------------------
-// Class with a list of all active field trials.  A trial is active if it has
-// been registered, which includes evaluating its state based on its probaility.
-// Only one instance of this class exists and outside of testing, will live for
-// the entire life time of the process.
-class BASE_EXPORT FieldTrialList {
- public:
-  typedef SharedPersistentMemoryAllocator FieldTrialAllocator;
-
-  // Type for function pointer passed to |AllParamsToString| used to escape
-  // special characters from |input|.
-  typedef std::string (*EscapeDataFunc)(const std::string& input);
-
-  // Year that is guaranteed to not be expired when instantiating a field trial
-  // via |FactoryGetFieldTrial()|.  Set to two years from the build date.
-  static int kNoExpirationYear;
-
-  // Observer is notified when a FieldTrial's group is selected.
-  class BASE_EXPORT Observer {
-   public:
-    // Notify observers when FieldTrials's group is selected.
-    virtual void OnFieldTrialGroupFinalized(const std::string& trial_name,
-                                            const std::string& group_name) = 0;
-
-   protected:
-    virtual ~Observer();
-  };
-
-  // This singleton holds the global list of registered FieldTrials.
-  //
-  // To support one-time randomized field trials, specify a non-null
-  // |entropy_provider| which should be a source of uniformly distributed
-  // entropy values. If one time randomization is not desired, pass in null for
-  // |entropy_provider|.
-  explicit FieldTrialList(
-      std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider);
-
-  // Destructor Release()'s references to all registered FieldTrial instances.
-  ~FieldTrialList();
-
-  // Get a FieldTrial instance from the factory.
-  //
-  // |name| is used to register the instance with the FieldTrialList class,
-  // and can be used to find the trial (only one trial can be present for each
-  // name). |default_group_name| is the name of the default group which will
-  // be chosen if none of the subsequent appended groups get to be chosen.
-  // |default_group_number| can receive the group number of the default group as
-  // AppendGroup returns the number of the subsequence groups. |trial_name| and
-  // |default_group_name| may not be empty but |default_group_number| can be
-  // NULL if the value is not needed.
-  //
-  // Group probabilities that are later supplied must sum to less than or equal
-  // to the |total_probability|. Arguments |year|, |month| and |day_of_month|
-  // specify the expiration time. If the build time is after the expiration time
-  // then the field trial reverts to the 'default' group.
-  //
-  // Use this static method to get a startup-randomized FieldTrial or a
-  // previously created forced FieldTrial.
-  static FieldTrial* FactoryGetFieldTrial(
-      const std::string& trial_name,
-      FieldTrial::Probability total_probability,
-      const std::string& default_group_name,
-      const int year,
-      const int month,
-      const int day_of_month,
-      FieldTrial::RandomizationType randomization_type,
-      int* default_group_number);
-
-  // Same as FactoryGetFieldTrial(), but allows specifying a custom seed to be
-  // used on one-time randomized field trials (instead of a hash of the trial
-  // name, which is used otherwise or if |randomization_seed| has value 0). The
-  // |randomization_seed| value (other than 0) should never be the same for two
-  // trials, else this would result in correlated group assignments.  Note:
-  // Using a custom randomization seed is only supported by the
-  // PermutedEntropyProvider (which is used when UMA is not enabled). If
-  // |override_entropy_provider| is not null, then it will be used for
-  // randomization instead of the provider given when the FieldTrialList was
-  // instantiated.
-  static FieldTrial* FactoryGetFieldTrialWithRandomizationSeed(
-      const std::string& trial_name,
-      FieldTrial::Probability total_probability,
-      const std::string& default_group_name,
-      const int year,
-      const int month,
-      const int day_of_month,
-      FieldTrial::RandomizationType randomization_type,
-      uint32_t randomization_seed,
-      int* default_group_number,
-      const FieldTrial::EntropyProvider* override_entropy_provider);
-
-  // The Find() method can be used to test to see if a named trial was already
-  // registered, or to retrieve a pointer to it from the global map.
-  static FieldTrial* Find(const std::string& trial_name);
-
-  // Returns the group number chosen for the named trial, or
-  // FieldTrial::kNotFinalized if the trial does not exist.
-  static int FindValue(const std::string& trial_name);
-
-  // Returns the group name chosen for the named trial, or the empty string if
-  // the trial does not exist. The first call of this function on a given field
-  // trial will mark it as active, so that its state will be reported with usage
-  // metrics, crashes, etc.
-  static std::string FindFullName(const std::string& trial_name);
-
-  // Returns true if the named trial has been registered.
-  static bool TrialExists(const std::string& trial_name);
-
-  // Returns true if the named trial exists and has been activated.
-  static bool IsTrialActive(const std::string& trial_name);
-
-  // Creates a persistent representation of active FieldTrial instances for
-  // resurrection in another process. This allows randomization to be done in
-  // one process, and secondary processes can be synchronized on the result.
-  // The resulting string contains the name and group name pairs of all
-  // registered FieldTrials for which the group has been chosen and externally
-  // observed (via |group()|) and which have not been disabled, with "/" used
-  // to separate all names and to terminate the string. This string is parsed
-  // by |CreateTrialsFromString()|.
-  static void StatesToString(std::string* output);
-
-  // Creates a persistent representation of all FieldTrial instances for
-  // resurrection in another process. This allows randomization to be done in
-  // one process, and secondary processes can be synchronized on the result.
-  // The resulting string contains the name and group name pairs of all
-  // registered FieldTrials including disabled based on |include_expired|,
-  // with "/" used to separate all names and to terminate the string. All
-  // activated trials have their name prefixed with "*". This string is parsed
-  // by |CreateTrialsFromString()|.
-  static void AllStatesToString(std::string* output, bool include_expired);
-
-  // Creates a persistent representation of all FieldTrial params for
-  // resurrection in another process. The returned string contains the trial
-  // name and group name pairs of all registered FieldTrials including disabled
-  // based on |include_expired| separated by '.'. The pair is followed by ':'
-  // separator and list of param name and values separated by '/'. It also takes
-  // |encode_data_func| function pointer for encodeing special charactors.
-  // This string is parsed by |AssociateParamsFromString()|.
-  static std::string AllParamsToString(bool include_expired,
-                                       EscapeDataFunc encode_data_func);
-
-  // Fills in the supplied vector |active_groups| (which must be empty when
-  // called) with a snapshot of all registered FieldTrials for which the group
-  // has been chosen and externally observed (via |group()|) and which have
-  // not been disabled.
-  static void GetActiveFieldTrialGroups(
-      FieldTrial::ActiveGroups* active_groups);
-
-  // Returns the field trials that are marked active in |trials_string|.
-  static void GetActiveFieldTrialGroupsFromString(
-      const std::string& trials_string,
-      FieldTrial::ActiveGroups* active_groups);
-
-  // Returns the field trials that were active when the process was
-  // created. Either parses the field trial string or the shared memory
-  // holding field trial information.
-  // Must be called only after a call to CreateTrialsFromCommandLine().
-  static void GetInitiallyActiveFieldTrials(
-      const base::CommandLine& command_line,
-      FieldTrial::ActiveGroups* active_groups);
-
-  // Use a state string (re: StatesToString()) to augment the current list of
-  // field trials to include the supplied trials, and using a 100% probability
-  // for each trial, force them to have the same group string. This is commonly
-  // used in a non-browser process, to carry randomly selected state in a
-  // browser process into this non-browser process, but could also be invoked
-  // through a command line argument to the browser process. Created field
-  // trials will be marked "used" for the purposes of active trial reporting
-  // if they are prefixed with |kActivationMarker|. Trial names in
-  // |ignored_trial_names| are ignored when parsing |trials_string|.
-  static bool CreateTrialsFromString(
-      const std::string& trials_string,
-      const std::set<std::string>& ignored_trial_names);
-
-  // Achieves the same thing as CreateTrialsFromString, except wraps the logic
-  // by taking in the trials from the command line, either via shared memory
-  // handle or command line argument. A bit of a misnomer since on POSIX we
-  // simply get the trials from opening |fd_key| if using shared memory. On
-  // Windows, we expect the |cmd_line| switch for |field_trial_handle_switch| to
-  // contain the shared memory handle that contains the field trial allocator.
-  // We need the |field_trial_handle_switch| and |fd_key| arguments to be passed
-  // in since base/ can't depend on content/.
-  static void CreateTrialsFromCommandLine(const base::CommandLine& cmd_line,
-                                          const char* field_trial_handle_switch,
-                                          int fd_key);
-
-  // Creates base::Feature overrides from the command line by first trying to
-  // use shared memory and then falling back to the command line if it fails.
-  static void CreateFeaturesFromCommandLine(
-      const base::CommandLine& command_line,
-      const char* enable_features_switch,
-      const char* disable_features_switch,
-      FeatureList* feature_list);
-
-#if defined(OS_WIN)
-  // On Windows, we need to explicitly pass down any handles to be inherited.
-  // This function adds the shared memory handle to field trial state to the
-  // list of handles to be inherited.
-  static void AppendFieldTrialHandleIfNeeded(
-      base::HandlesToInheritVector* handles);
-#elif defined(OS_FUCHSIA)
-  // TODO(fuchsia): Implement shared-memory configuration (crbug.com/752368).
-#elif defined(OS_POSIX) && !defined(OS_NACL)
-  // On POSIX, we also need to explicitly pass down this file descriptor that
-  // should be shared with the child process. Returns an invalid handle if it
-  // was not initialized properly.
-  static base::SharedMemoryHandle GetFieldTrialHandle();
-#endif
-
-  // Adds a switch to the command line containing the field trial state as a
-  // string (if not using shared memory to share field trial state), or the
-  // shared memory handle + length.
-  // Needs the |field_trial_handle_switch| argument to be passed in since base/
-  // can't depend on content/.
-  static void CopyFieldTrialStateToFlags(const char* field_trial_handle_switch,
-                                         const char* enable_features_switch,
-                                         const char* disable_features_switch,
-                                         base::CommandLine* cmd_line);
-
-  // Create a FieldTrial with the given |name| and using 100% probability for
-  // the FieldTrial, force FieldTrial to have the same group string as
-  // |group_name|. This is commonly used in a non-browser process, to carry
-  // randomly selected state in a browser process into this non-browser process.
-  // It returns NULL if there is a FieldTrial that is already registered with
-  // the same |name| but has different finalized group string (|group_name|).
-  static FieldTrial* CreateFieldTrial(const std::string& name,
-                                      const std::string& group_name);
-
-  // Add an observer to be notified when a field trial is irrevocably committed
-  // to being part of some specific field_group (and hence the group_name is
-  // also finalized for that field_trial). Returns false and does nothing if
-  // there is no FieldTrialList singleton.
-  static bool AddObserver(Observer* observer);
-
-  // Remove an observer.
-  static void RemoveObserver(Observer* observer);
-
-  // Similar to AddObserver(), but the passed observer will be notified
-  // synchronously when a field trial is activated and its group selected. It
-  // will be notified synchronously on the same thread where the activation and
-  // group selection happened. It is the responsibility of the observer to make
-  // sure that this is a safe operation and the operation must be fast, as this
-  // work is done synchronously as part of group() or related APIs. Only a
-  // single such observer is supported, exposed specifically for crash
-  // reporting. Must be called on the main thread before any other threads
-  // have been started.
-  static void SetSynchronousObserver(Observer* observer);
-
-  // Removes the single synchronous observer.
-  static void RemoveSynchronousObserver(Observer* observer);
-
-  // Grabs the lock if necessary and adds the field trial to the allocator. This
-  // should only be called from FinalizeGroupChoice().
-  static void OnGroupFinalized(bool is_locked, FieldTrial* field_trial);
-
-  // Notify all observers that a group has been finalized for |field_trial|.
-  static void NotifyFieldTrialGroupSelection(FieldTrial* field_trial);
-
-  // Return the number of active field trials.
-  static size_t GetFieldTrialCount();
-
-  // Gets the parameters for |field_trial| from shared memory and stores them in
-  // |params|. This is only exposed for use by FieldTrialParamAssociator and
-  // shouldn't be used by anything else.
-  static bool GetParamsFromSharedMemory(
-      FieldTrial* field_trial,
-      std::map<std::string, std::string>* params);
-
-  // Clears all the params in the allocator.
-  static void ClearParamsFromSharedMemoryForTesting();
-
-  // Dumps field trial state to an allocator so that it can be analyzed after a
-  // crash.
-  static void DumpAllFieldTrialsToPersistentAllocator(
-      PersistentMemoryAllocator* allocator);
-
-  // Retrieves field trial state from an allocator so that it can be analyzed
-  // after a crash. The pointers in the returned vector are into the persistent
-  // memory segment and so are only valid as long as the allocator is valid.
-  static std::vector<const FieldTrial::FieldTrialEntry*>
-  GetAllFieldTrialsFromPersistentAllocator(
-      PersistentMemoryAllocator const& allocator);
-
-  // Returns true if a global field trial list is set. Only used for testing.
-  static bool IsGlobalSetForTesting();
-
- private:
-  // Allow tests to access our innards for testing purposes.
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, InstantiateAllocator);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, AddTrialsToAllocator);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
-                           DoNotAddSimulatedFieldTrialsToAllocator);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, AssociateFieldTrialParams);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, ClearParamsFromSharedMemory);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
-                           SerializeSharedMemoryHandleMetadata);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, CheckReadOnlySharedMemoryHandle);
-
-  // Serialization is used to pass information about the handle to child
-  // processes. It passes a reference to the relevant OS resource, and it passes
-  // a GUID. Serialization and deserialization doesn't actually transport the
-  // underlying OS resource - that must be done by the Process launcher.
-  static std::string SerializeSharedMemoryHandleMetadata(
-      const SharedMemoryHandle& shm);
-#if defined(OS_WIN) || defined(OS_FUCHSIA)
-  static SharedMemoryHandle DeserializeSharedMemoryHandleMetadata(
-      const std::string& switch_value);
-#elif defined(OS_POSIX) && !defined(OS_NACL)
-  static SharedMemoryHandle DeserializeSharedMemoryHandleMetadata(
-      int fd,
-      const std::string& switch_value);
-#endif
-
-#if defined(OS_WIN) || defined(OS_FUCHSIA)
-  // Takes in |handle_switch| from the command line which represents the shared
-  // memory handle for field trials, parses it, and creates the field trials.
-  // Returns true on success, false on failure.
-  // |switch_value| also contains the serialized GUID.
-  static bool CreateTrialsFromSwitchValue(const std::string& switch_value);
-#elif defined(OS_POSIX) && !defined(OS_NACL)
-  // On POSIX systems that use the zygote, we look up the correct fd that backs
-  // the shared memory segment containing the field trials by looking it up via
-  // an fd key in GlobalDescriptors. Returns true on success, false on failure.
-  // |switch_value| also contains the serialized GUID.
-  static bool CreateTrialsFromDescriptor(int fd_key,
-                                         const std::string& switch_value);
-#endif
-
-  // Takes an unmapped SharedMemoryHandle, creates a SharedMemory object from it
-  // and maps it with the correct size.
-  static bool CreateTrialsFromSharedMemoryHandle(SharedMemoryHandle shm_handle);
-
-  // Expects a mapped piece of shared memory |shm| that was created from the
-  // browser process's field_trial_allocator and shared via the command line.
-  // This function recreates the allocator, iterates through all the field
-  // trials in it, and creates them via CreateFieldTrial(). Returns true if
-  // successful and false otherwise.
-  static bool CreateTrialsFromSharedMemory(
-      std::unique_ptr<base::SharedMemory> shm);
-
-  // Instantiate the field trial allocator, add all existing field trials to it,
-  // and duplicates its handle to a read-only handle, which gets stored in
-  // |readonly_allocator_handle|.
-  static void InstantiateFieldTrialAllocatorIfNeeded();
-
-  // Adds the field trial to the allocator. Caller must hold a lock before
-  // calling this.
-  static void AddToAllocatorWhileLocked(PersistentMemoryAllocator* allocator,
-                                        FieldTrial* field_trial);
-
-  // Activate the corresponding field trial entry struct in shared memory.
-  static void ActivateFieldTrialEntryWhileLocked(FieldTrial* field_trial);
-
-  // A map from FieldTrial names to the actual instances.
-  typedef std::map<std::string, FieldTrial*> RegistrationMap;
-
-  // If one-time randomization is enabled, returns a weak pointer to the
-  // corresponding EntropyProvider. Otherwise, returns NULL.
-  static const FieldTrial::EntropyProvider*
-      GetEntropyProviderForOneTimeRandomization();
-
-  // Helper function should be called only while holding lock_.
-  FieldTrial* PreLockedFind(const std::string& name);
-
-  // Register() stores a pointer to the given trial in a global map.
-  // This method also AddRef's the indicated trial.
-  // This should always be called after creating a new FieldTrial instance.
-  static void Register(FieldTrial* trial);
-
-  // Returns all the registered trials.
-  static RegistrationMap GetRegisteredTrials();
-
-  static FieldTrialList* global_;  // The singleton of this class.
-
-  // This will tell us if there is an attempt to register a field
-  // trial or check if one-time randomization is enabled without
-  // creating the FieldTrialList. This is not an error, unless a
-  // FieldTrialList is created after that.
-  static bool used_without_global_;
-
-  // Lock for access to registered_ and field_trial_allocator_.
-  Lock lock_;
-  RegistrationMap registered_;
-
-  std::map<std::string, std::string> seen_states_;
-
-  // Entropy provider to be used for one-time randomized field trials. If NULL,
-  // one-time randomization is not supported.
-  std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider_;
-
-  // List of observers to be notified when a group is selected for a FieldTrial.
-  scoped_refptr<ObserverListThreadSafe<Observer> > observer_list_;
-
-  // Single synchronous observer to be notified when a trial group is chosen.
-  Observer* synchronous_observer_ = nullptr;
-
-  // Allocator in shared memory containing field trial data. Used in both
-  // browser and child processes, but readonly in the child.
-  // In the future, we may want to move this to a more generic place if we want
-  // to start passing more data other than field trials.
-  std::unique_ptr<FieldTrialAllocator> field_trial_allocator_ = nullptr;
-
-  // Readonly copy of the handle to the allocator. Needs to be a member variable
-  // because it's needed from both CopyFieldTrialStateToFlags() and
-  // AppendFieldTrialHandleIfNeeded().
-  base::SharedMemoryHandle readonly_allocator_handle_;
-
-  // Tracks whether CreateTrialsFromCommandLine() has been called.
-  bool create_trials_from_command_line_called_ = false;
-
-  DISALLOW_COPY_AND_ASSIGN(FieldTrialList);
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_FIELD_TRIAL_H_
diff --git a/base/metrics/field_trial_param_associator.cc b/base/metrics/field_trial_param_associator.cc
deleted file mode 100644
index af76eaf..0000000
--- a/base/metrics/field_trial_param_associator.cc
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/field_trial_param_associator.h"
-
-#include "base/metrics/field_trial.h"
-
-namespace base {
-
-FieldTrialParamAssociator::FieldTrialParamAssociator() = default;
-FieldTrialParamAssociator::~FieldTrialParamAssociator() = default;
-
-// static
-FieldTrialParamAssociator* FieldTrialParamAssociator::GetInstance() {
-  return Singleton<FieldTrialParamAssociator,
-                   LeakySingletonTraits<FieldTrialParamAssociator>>::get();
-}
-
-bool FieldTrialParamAssociator::AssociateFieldTrialParams(
-    const std::string& trial_name,
-    const std::string& group_name,
-    const FieldTrialParams& params) {
-  if (FieldTrialList::IsTrialActive(trial_name))
-    return false;
-
-  AutoLock scoped_lock(lock_);
-  const FieldTrialKey key(trial_name, group_name);
-  if (ContainsKey(field_trial_params_, key))
-    return false;
-
-  field_trial_params_[key] = params;
-  return true;
-}
-
-bool FieldTrialParamAssociator::GetFieldTrialParams(
-    const std::string& trial_name,
-    FieldTrialParams* params) {
-  FieldTrial* field_trial = FieldTrialList::Find(trial_name);
-  if (!field_trial)
-    return false;
-
-  // First try the local map, falling back to getting it from shared memory.
-  if (GetFieldTrialParamsWithoutFallback(trial_name, field_trial->group_name(),
-                                         params)) {
-    return true;
-  }
-
-  // TODO(lawrencewu): add the params to field_trial_params_ for next time.
-  return FieldTrialList::GetParamsFromSharedMemory(field_trial, params);
-}
-
-bool FieldTrialParamAssociator::GetFieldTrialParamsWithoutFallback(
-    const std::string& trial_name,
-    const std::string& group_name,
-    FieldTrialParams* params) {
-  AutoLock scoped_lock(lock_);
-
-  const FieldTrialKey key(trial_name, group_name);
-  if (!ContainsKey(field_trial_params_, key))
-    return false;
-
-  *params = field_trial_params_[key];
-  return true;
-}
-
-void FieldTrialParamAssociator::ClearAllParamsForTesting() {
-  {
-    AutoLock scoped_lock(lock_);
-    field_trial_params_.clear();
-  }
-  FieldTrialList::ClearParamsFromSharedMemoryForTesting();
-}
-
-void FieldTrialParamAssociator::ClearParamsForTesting(
-    const std::string& trial_name,
-    const std::string& group_name) {
-  AutoLock scoped_lock(lock_);
-  const FieldTrialKey key(trial_name, group_name);
-  field_trial_params_.erase(key);
-}
-
-void FieldTrialParamAssociator::ClearAllCachedParamsForTesting() {
-  AutoLock scoped_lock(lock_);
-  field_trial_params_.clear();
-}
-
-}  // namespace base
diff --git a/base/metrics/field_trial_param_associator.h b/base/metrics/field_trial_param_associator.h
deleted file mode 100644
index b35e2cc..0000000
--- a/base/metrics/field_trial_param_associator.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
-#define BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
-
-#include <map>
-#include <string>
-#include <utility>
-
-#include "base/base_export.h"
-#include "base/memory/singleton.h"
-#include "base/metrics/field_trial.h"
-#include "base/synchronization/lock.h"
-
-namespace base {
-
-// Keeps track of the parameters of all field trials and ensures access to them
-// is thread-safe.
-class BASE_EXPORT FieldTrialParamAssociator {
- public:
-  FieldTrialParamAssociator();
-  ~FieldTrialParamAssociator();
-
-  // Key-value mapping type for field trial parameters.
-  typedef std::map<std::string, std::string> FieldTrialParams;
-
-  // Retrieve the singleton.
-  static FieldTrialParamAssociator* GetInstance();
-
-  // Sets parameters for the given field trial name and group.
-  bool AssociateFieldTrialParams(const std::string& trial_name,
-                                 const std::string& group_name,
-                                 const FieldTrialParams& params);
-
-  // Gets the parameters for a field trial and its chosen group. If not found in
-  // field_trial_params_, then tries to looks it up in shared memory.
-  bool GetFieldTrialParams(const std::string& trial_name,
-                           FieldTrialParams* params);
-
-  // Gets the parameters for a field trial and its chosen group. Does not
-  // fallback to looking it up in shared memory. This should only be used if you
-  // know for sure the params are in the mapping, like if you're in the browser
-  // process, and even then you should probably just use GetFieldTrialParams().
-  bool GetFieldTrialParamsWithoutFallback(const std::string& trial_name,
-                                          const std::string& group_name,
-                                          FieldTrialParams* params);
-
-  // Clears the internal field_trial_params_ mapping, plus removes all params in
-  // shared memory.
-  void ClearAllParamsForTesting();
-
-  // Clears a single field trial param.
-  // Note: this does NOT remove the param in shared memory.
-  void ClearParamsForTesting(const std::string& trial_name,
-                             const std::string& group_name);
-
-  // Clears the internal field_trial_params_ mapping.
-  void ClearAllCachedParamsForTesting();
-
- private:
-  friend struct DefaultSingletonTraits<FieldTrialParamAssociator>;
-
-  // (field_trial_name, field_trial_group)
-  typedef std::pair<std::string, std::string> FieldTrialKey;
-
-  Lock lock_;
-  std::map<FieldTrialKey, FieldTrialParams> field_trial_params_;
-
-  DISALLOW_COPY_AND_ASSIGN(FieldTrialParamAssociator);
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
diff --git a/base/metrics/field_trial_params.cc b/base/metrics/field_trial_params.cc
deleted file mode 100644
index 7195f4a..0000000
--- a/base/metrics/field_trial_params.cc
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/field_trial_params.h"
-
-#include "base/feature_list.h"
-#include "base/metrics/field_trial.h"
-#include "base/metrics/field_trial_param_associator.h"
-#include "base/strings/string_number_conversions.h"
-
-namespace base {
-
-bool AssociateFieldTrialParams(
-    const std::string& trial_name,
-    const std::string& group_name,
-    const std::map<std::string, std::string>& params) {
-  return base::FieldTrialParamAssociator::GetInstance()
-      ->AssociateFieldTrialParams(trial_name, group_name, params);
-}
-
-bool GetFieldTrialParams(const std::string& trial_name,
-                         std::map<std::string, std::string>* params) {
-  return base::FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(
-      trial_name, params);
-}
-
-bool GetFieldTrialParamsByFeature(const base::Feature& feature,
-                                  std::map<std::string, std::string>* params) {
-  if (!base::FeatureList::IsEnabled(feature))
-    return false;
-
-  base::FieldTrial* trial = base::FeatureList::GetFieldTrial(feature);
-  if (!trial)
-    return false;
-
-  return GetFieldTrialParams(trial->trial_name(), params);
-}
-
-std::string GetFieldTrialParamValue(const std::string& trial_name,
-                                    const std::string& param_name) {
-  std::map<std::string, std::string> params;
-  if (GetFieldTrialParams(trial_name, &params)) {
-    std::map<std::string, std::string>::iterator it = params.find(param_name);
-    if (it != params.end())
-      return it->second;
-  }
-  return std::string();
-}
-
-std::string GetFieldTrialParamValueByFeature(const base::Feature& feature,
-                                             const std::string& param_name) {
-  if (!base::FeatureList::IsEnabled(feature))
-    return std::string();
-
-  base::FieldTrial* trial = base::FeatureList::GetFieldTrial(feature);
-  if (!trial)
-    return std::string();
-
-  return GetFieldTrialParamValue(trial->trial_name(), param_name);
-}
-
-int GetFieldTrialParamByFeatureAsInt(const base::Feature& feature,
-                                     const std::string& param_name,
-                                     int default_value) {
-  std::string value_as_string =
-      GetFieldTrialParamValueByFeature(feature, param_name);
-  int value_as_int = 0;
-  if (!base::StringToInt(value_as_string, &value_as_int)) {
-    if (!value_as_string.empty()) {
-      DLOG(WARNING) << "Failed to parse field trial param " << param_name
-                    << " with string value " << value_as_string
-                    << " under feature " << feature.name
-                    << " into an int. Falling back to default value of "
-                    << default_value;
-    }
-    value_as_int = default_value;
-  }
-  return value_as_int;
-}
-
-double GetFieldTrialParamByFeatureAsDouble(const base::Feature& feature,
-                                           const std::string& param_name,
-                                           double default_value) {
-  std::string value_as_string =
-      GetFieldTrialParamValueByFeature(feature, param_name);
-  double value_as_double = 0;
-  if (!base::StringToDouble(value_as_string, &value_as_double)) {
-    if (!value_as_string.empty()) {
-      DLOG(WARNING) << "Failed to parse field trial param " << param_name
-                    << " with string value " << value_as_string
-                    << " under feature " << feature.name
-                    << " into a double. Falling back to default value of "
-                    << default_value;
-    }
-    value_as_double = default_value;
-  }
-  return value_as_double;
-}
-
-bool GetFieldTrialParamByFeatureAsBool(const base::Feature& feature,
-                                       const std::string& param_name,
-                                       bool default_value) {
-  std::string value_as_string =
-      GetFieldTrialParamValueByFeature(feature, param_name);
-  if (value_as_string == "true")
-    return true;
-  if (value_as_string == "false")
-    return false;
-
-  if (!value_as_string.empty()) {
-    DLOG(WARNING) << "Failed to parse field trial param " << param_name
-                  << " with string value " << value_as_string
-                  << " under feature " << feature.name
-                  << " into a bool. Falling back to default value of "
-                  << default_value;
-  }
-  return default_value;
-}
-
-std::string FeatureParam<std::string>::Get() const {
-  const std::string value = GetFieldTrialParamValueByFeature(*feature, name);
-  return value.empty() ? default_value : value;
-}
-
-double FeatureParam<double>::Get() const {
-  return GetFieldTrialParamByFeatureAsDouble(*feature, name, default_value);
-}
-
-int FeatureParam<int>::Get() const {
-  return GetFieldTrialParamByFeatureAsInt(*feature, name, default_value);
-}
-
-bool FeatureParam<bool>::Get() const {
-  return GetFieldTrialParamByFeatureAsBool(*feature, name, default_value);
-}
-
-void LogInvalidEnumValue(const base::Feature& feature,
-                         const std::string& param_name,
-                         const std::string& value_as_string,
-                         int default_value_as_int) {
-  DLOG(WARNING) << "Failed to parse field trial param " << param_name
-                << " with string value " << value_as_string << " under feature "
-                << feature.name
-                << " into an enum. Falling back to default value of "
-                << default_value_as_int;
-}
-
-}  // namespace base
diff --git a/base/metrics/field_trial_params.h b/base/metrics/field_trial_params.h
deleted file mode 100644
index 8682226..0000000
--- a/base/metrics/field_trial_params.h
+++ /dev/null
@@ -1,258 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_FIELD_TRIAL_PARAMS_H_
-#define BASE_METRICS_FIELD_TRIAL_PARAMS_H_
-
-#include <map>
-#include <string>
-
-#include "base/base_export.h"
-
-namespace base {
-
-struct Feature;
-
-// Associates the specified set of key-value |params| with the field trial
-// specified by |trial_name| and |group_name|. Fails and returns false if the
-// specified field trial already has params associated with it or the trial
-// is already active (group() has been called on it). Thread safe.
-BASE_EXPORT bool AssociateFieldTrialParams(
-    const std::string& trial_name,
-    const std::string& group_name,
-    const std::map<std::string, std::string>& params);
-
-// Retrieves the set of key-value |params| for the specified field trial, based
-// on its selected group. If the field trial does not exist or its selected
-// group does not have any parameters associated with it, returns false and
-// does not modify |params|. Calling this function will result in the field
-// trial being marked as active if found (i.e. group() will be called on it),
-// if it wasn't already. Thread safe.
-BASE_EXPORT bool GetFieldTrialParams(
-    const std::string& trial_name,
-    std::map<std::string, std::string>* params);
-
-// Retrieves the set of key-value |params| for the field trial associated with
-// the specified |feature|. A feature is associated with at most one field
-// trial and selected group. See  base/feature_list.h for more information on
-// features. If the feature is not enabled, or if there's no associated params,
-// returns false and does not modify |params|. Calling this function will
-// result in the associated field trial being marked as active if found (i.e.
-// group() will be called on it), if it wasn't already. Thread safe.
-BASE_EXPORT bool GetFieldTrialParamsByFeature(
-    const base::Feature& feature,
-    std::map<std::string, std::string>* params);
-
-// Retrieves a specific parameter value corresponding to |param_name| for the
-// specified field trial, based on its selected group. If the field trial does
-// not exist or the specified parameter does not exist, returns an empty
-// string. Calling this function will result in the field trial being marked as
-// active if found (i.e. group() will be called on it), if it wasn't already.
-// Thread safe.
-BASE_EXPORT std::string GetFieldTrialParamValue(const std::string& trial_name,
-                                                const std::string& param_name);
-
-// Retrieves a specific parameter value corresponding to |param_name| for the
-// field trial associated with the specified |feature|. A feature is associated
-// with at most one field trial and selected group. See base/feature_list.h for
-// more information on features. If the feature is not enabled, or the
-// specified parameter does not exist, returns an empty string. Calling this
-// function will result in the associated field trial being marked as active if
-// found (i.e. group() will be called on it), if it wasn't already. Thread safe.
-BASE_EXPORT std::string GetFieldTrialParamValueByFeature(
-    const base::Feature& feature,
-    const std::string& param_name);
-
-// Same as GetFieldTrialParamValueByFeature(). On top of that, it converts the
-// string value into an int using base::StringToInt() and returns it, if
-// successful. Otherwise, it returns |default_value|. If the string value is not
-// empty and the conversion does not succeed, it produces a warning to LOG.
-BASE_EXPORT int GetFieldTrialParamByFeatureAsInt(const base::Feature& feature,
-                                                 const std::string& param_name,
-                                                 int default_value);
-
-// Same as GetFieldTrialParamValueByFeature(). On top of that, it converts the
-// string value into a double using base::StringToDouble() and returns it, if
-// successful. Otherwise, it returns |default_value|. If the string value is not
-// empty and the conversion does not succeed, it produces a warning to LOG.
-BASE_EXPORT double GetFieldTrialParamByFeatureAsDouble(
-    const base::Feature& feature,
-    const std::string& param_name,
-    double default_value);
-
-// Same as GetFieldTrialParamValueByFeature(). On top of that, it converts the
-// string value into a boolean and returns it, if successful. Otherwise, it
-// returns |default_value|. The only string representations accepted here are
-// "true" and "false". If the string value is not empty and the conversion does
-// not succeed, it produces a warning to LOG.
-BASE_EXPORT bool GetFieldTrialParamByFeatureAsBool(
-    const base::Feature& feature,
-    const std::string& param_name,
-    bool default_value);
-
-// Shared declaration for various FeatureParam<T> types.
-//
-// This template is defined for the following types T:
-//   bool
-//   int
-//   double
-//   std::string
-//   enum types
-//
-// See the individual definitions below for the appropriate interfaces.
-// Attempting to use it with any other type is a compile error.
-template <typename T, bool IsEnum = std::is_enum<T>::value>
-struct FeatureParam {
-  // Prevent use of FeatureParam<> with unsupported types (e.g. void*). Uses T
-  // in its definition so that evaluation is deferred until the template is
-  // instantiated.
-  static_assert(!std::is_same<T, T>::value, "unsupported FeatureParam<> type");
-};
-
-// Declares a string-valued parameter. Example:
-//
-//     constexpr FeatureParam<string> kAssistantName{
-//         &kAssistantFeature, "assistant_name", "HAL"};
-//
-// If the feature is not set, or set to the empty string, then Get() will return
-// the default value.
-template <>
-struct FeatureParam<std::string> {
-  constexpr FeatureParam(const Feature* feature,
-                         const char* name,
-                         const char* default_value)
-      : feature(feature), name(name), default_value(default_value) {}
-
-  BASE_EXPORT std::string Get() const;
-
-  const Feature* const feature;
-  const char* const name;
-  const char* const default_value;
-};
-
-// Declares a double-valued parameter. Example:
-//
-//     constexpr FeatureParam<double> kAssistantTriggerThreshold{
-//         &kAssistantFeature, "trigger_threshold", 0.10};
-//
-// If the feature is not set, or set to an invalid double value, then Get() will
-// return the default value.
-template <>
-struct FeatureParam<double> {
-  constexpr FeatureParam(const Feature* feature,
-                         const char* name,
-                         double default_value)
-      : feature(feature), name(name), default_value(default_value) {}
-
-  BASE_EXPORT double Get() const;
-
-  const Feature* const feature;
-  const char* const name;
-  const double default_value;
-};
-
-// Declares an int-valued parameter. Example:
-//
-//     constexpr FeatureParam<int> kAssistantParallelism{
-//         &kAssistantFeature, "parallelism", 4};
-//
-// If the feature is not set, or set to an invalid int value, then Get() will
-// return the default value.
-template <>
-struct FeatureParam<int> {
-  constexpr FeatureParam(const Feature* feature,
-                         const char* name,
-                         int default_value)
-      : feature(feature), name(name), default_value(default_value) {}
-
-  BASE_EXPORT int Get() const;
-
-  const Feature* const feature;
-  const char* const name;
-  const int default_value;
-};
-
-// Declares a bool-valued parameter. Example:
-//
-//     constexpr FeatureParam<int> kAssistantIsHelpful{
-//         &kAssistantFeature, "is_helpful", true};
-//
-// If the feature is not set, or set to value other than "true" or "false", then
-// Get() will return the default value.
-template <>
-struct FeatureParam<bool> {
-  constexpr FeatureParam(const Feature* feature,
-                         const char* name,
-                         bool default_value)
-      : feature(feature), name(name), default_value(default_value) {}
-
-  BASE_EXPORT bool Get() const;
-
-  const Feature* const feature;
-  const char* const name;
-  const bool default_value;
-};
-
-BASE_EXPORT void LogInvalidEnumValue(const Feature& feature,
-                                     const std::string& param_name,
-                                     const std::string& value_as_string,
-                                     int default_value_as_int);
-
-// Feature param declaration for an enum, with associated options. Example:
-//
-//     constexpr FeatureParam<ShapeEnum>::Option[] kShapeParamOptions[] = {
-//         {SHAPE_CIRCLE, "circle"},
-//         {SHAPE_CYLINDER, "cylinder"},
-//         {SHAPE_PAPERCLIP, "paperclip"}};
-//     constexpr FeatureParam<ShapeEnum> kAssistantShapeParam{
-//         &kAssistantFeature, "shape", SHAPE_CIRCLE, &kShapeParamOptions};
-//
-// With this declaration, the parameter may be set to "circle", "cylinder", or
-// "paperclip", and that will be translated to one of the three enum values. By
-// default, or if the param is set to an unknown value, the parameter will be
-// assumed to be SHAPE_CIRCLE.
-template <typename Enum>
-struct FeatureParam<Enum, true> {
-  struct Option {
-    constexpr Option(Enum value, const char* name) : value(value), name(name) {}
-
-    const Enum value;
-    const char* const name;
-  };
-
-  template <size_t option_count>
-  constexpr FeatureParam(const Feature* feature,
-                         const char* name,
-                         const Enum default_value,
-                         const Option (*options)[option_count])
-      : feature(feature),
-        name(name),
-        default_value(default_value),
-        options(*options),
-        option_count(option_count) {
-    static_assert(option_count >= 1, "FeatureParam<enum> has no options");
-  }
-
-  Enum Get() const {
-    std::string value = GetFieldTrialParamValueByFeature(*feature, name);
-    if (value.empty())
-      return default_value;
-    for (size_t i = 0; i < option_count; ++i) {
-      if (value == options[i].name)
-        return options[i].value;
-    }
-    LogInvalidEnumValue(*feature, name, value, static_cast<int>(default_value));
-    return default_value;
-  }
-
-  const base::Feature* const feature;
-  const char* const name;
-  const Enum default_value;
-  const Option* const options;
-  const size_t option_count;
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_FIELD_TRIAL_PARAMS_H_
diff --git a/base/metrics/histogram.cc b/base/metrics/histogram.cc
deleted file mode 100644
index 07e09cf..0000000
--- a/base/metrics/histogram.cc
+++ /dev/null
@@ -1,1315 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Histogram is an object that aggregates statistics, and can summarize them in
-// various forms, including ASCII graphical, HTML, and numerically (as a
-// vector of numbers corresponding to each of the aggregating buckets).
-// See header file for details and examples.
-
-#include "base/metrics/histogram.h"
-
-#include <inttypes.h>
-#include <limits.h>
-#include <math.h>
-
-#include <algorithm>
-#include <string>
-#include <utility>
-
-#include "base/compiler_specific.h"
-#include "base/debug/alias.h"
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/metrics/dummy_histogram.h"
-#include "base/metrics/histogram_functions.h"
-#include "base/metrics/metrics_hashes.h"
-#include "base/metrics/persistent_histogram_allocator.h"
-#include "base/metrics/persistent_memory_allocator.h"
-#include "base/metrics/sample_vector.h"
-#include "base/metrics/statistics_recorder.h"
-#include "base/pickle.h"
-#include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
-#include "base/synchronization/lock.h"
-#include "base/sys_info.h"
-#include "base/values.h"
-#include "build_config.h"
-
-namespace base {
-
-namespace {
-
-bool ReadHistogramArguments(PickleIterator* iter,
-                            std::string* histogram_name,
-                            int* flags,
-                            int* declared_min,
-                            int* declared_max,
-                            uint32_t* bucket_count,
-                            uint32_t* range_checksum) {
-  if (!iter->ReadString(histogram_name) ||
-      !iter->ReadInt(flags) ||
-      !iter->ReadInt(declared_min) ||
-      !iter->ReadInt(declared_max) ||
-      !iter->ReadUInt32(bucket_count) ||
-      !iter->ReadUInt32(range_checksum)) {
-    DLOG(ERROR) << "Pickle error decoding Histogram: " << *histogram_name;
-    return false;
-  }
-
-  // Since these fields may have come from an untrusted renderer, do additional
-  // checks above and beyond those in Histogram::Initialize()
-  if (*declared_max <= 0 ||
-      *declared_min <= 0 ||
-      *declared_max < *declared_min ||
-      INT_MAX / sizeof(HistogramBase::Count) <= *bucket_count ||
-      *bucket_count < 2) {
-    DLOG(ERROR) << "Values error decoding Histogram: " << histogram_name;
-    return false;
-  }
-
-  // We use the arguments to find or create the local version of the histogram
-  // in this process, so we need to clear any IPC flag.
-  *flags &= ~HistogramBase::kIPCSerializationSourceFlag;
-
-  return true;
-}
-
-bool ValidateRangeChecksum(const HistogramBase& histogram,
-                           uint32_t range_checksum) {
-  // Normally, |histogram| should have type HISTOGRAM or be inherited from it.
-  // However, if it's expired, it will actually be a DUMMY_HISTOGRAM.
-  // Skip the checks in that case.
-  if (histogram.GetHistogramType() == DUMMY_HISTOGRAM)
-    return true;
-  const Histogram& casted_histogram =
-      static_cast<const Histogram&>(histogram);
-
-  return casted_histogram.bucket_ranges()->checksum() == range_checksum;
-}
-
-}  // namespace
-
-typedef HistogramBase::Count Count;
-typedef HistogramBase::Sample Sample;
-
-// static
-const uint32_t Histogram::kBucketCount_MAX = 16384u;
-
-class Histogram::Factory {
- public:
-  Factory(const std::string& name,
-          HistogramBase::Sample minimum,
-          HistogramBase::Sample maximum,
-          uint32_t bucket_count,
-          int32_t flags)
-    : Factory(name, HISTOGRAM, minimum, maximum, bucket_count, flags) {}
-
-  // Create histogram based on construction parameters. Caller takes
-  // ownership of the returned object.
-  HistogramBase* Build();
-
- protected:
-  Factory(const std::string& name,
-          HistogramType histogram_type,
-          HistogramBase::Sample minimum,
-          HistogramBase::Sample maximum,
-          uint32_t bucket_count,
-          int32_t flags)
-    : name_(name),
-      histogram_type_(histogram_type),
-      minimum_(minimum),
-      maximum_(maximum),
-      bucket_count_(bucket_count),
-      flags_(flags) {}
-
-  // Create a BucketRanges structure appropriate for this histogram.
-  virtual BucketRanges* CreateRanges() {
-    BucketRanges* ranges = new BucketRanges(bucket_count_ + 1);
-    Histogram::InitializeBucketRanges(minimum_, maximum_, ranges);
-    return ranges;
-  }
-
-  // Allocate the correct Histogram object off the heap (in case persistent
-  // memory is not available).
-  virtual std::unique_ptr<HistogramBase> HeapAlloc(const BucketRanges* ranges) {
-    return WrapUnique(
-        new Histogram(GetPermanentName(name_), minimum_, maximum_, ranges));
-  }
-
-  // Perform any required datafill on the just-created histogram.  If
-  // overridden, be sure to call the "super" version -- this method may not
-  // always remain empty.
-  virtual void FillHistogram(HistogramBase* histogram) {}
-
-  // These values are protected (instead of private) because they need to
-  // be accessible to methods of sub-classes in order to avoid passing
-  // unnecessary parameters everywhere.
-  const std::string& name_;
-  const HistogramType histogram_type_;
-  HistogramBase::Sample minimum_;
-  HistogramBase::Sample maximum_;
-  uint32_t bucket_count_;
-  int32_t flags_;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(Factory);
-};
-
-HistogramBase* Histogram::Factory::Build() {
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name_);
-  if (!histogram) {
-    // TODO(gayane): |HashMetricName()| is called again in Histogram
-    // constructor. Refactor code to avoid the additional call.
-    bool should_record =
-        StatisticsRecorder::ShouldRecordHistogram(HashMetricName(name_));
-    if (!should_record)
-      return DummyHistogram::GetInstance();
-    // To avoid racy destruction at shutdown, the following will be leaked.
-    const BucketRanges* created_ranges = CreateRanges();
-    const BucketRanges* registered_ranges =
-        StatisticsRecorder::RegisterOrDeleteDuplicateRanges(created_ranges);
-
-    // In most cases, the bucket-count, minimum, and maximum values are known
-    // when the code is written and so are passed in explicitly. In other
-    // cases (such as with a CustomHistogram), they are calculated dynamically
-    // at run-time. In the latter case, those ctor parameters are zero and
-    // the results extracted from the result of CreateRanges().
-    if (bucket_count_ == 0) {
-      bucket_count_ = static_cast<uint32_t>(registered_ranges->bucket_count());
-      minimum_ = registered_ranges->range(1);
-      maximum_ = registered_ranges->range(bucket_count_ - 1);
-    }
-    DCHECK_EQ(minimum_, registered_ranges->range(1));
-    DCHECK_EQ(maximum_, registered_ranges->range(bucket_count_ - 1));
-
-    // Try to create the histogram using a "persistent" allocator. As of
-    // 2016-02-25, the availability of such is controlled by a base::Feature
-    // that is off by default. If the allocator doesn't exist or if
-    // allocating from it fails, code below will allocate the histogram from
-    // the process heap.
-    PersistentHistogramAllocator::Reference histogram_ref = 0;
-    std::unique_ptr<HistogramBase> tentative_histogram;
-    PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
-    if (allocator) {
-      tentative_histogram = allocator->AllocateHistogram(
-          histogram_type_,
-          name_,
-          minimum_,
-          maximum_,
-          registered_ranges,
-          flags_,
-          &histogram_ref);
-    }
-
-    // Handle the case where no persistent allocator is present or the
-    // persistent allocation fails (perhaps because it is full).
-    if (!tentative_histogram) {
-      DCHECK(!histogram_ref);  // Should never have been set.
-      DCHECK(!allocator);  // Shouldn't have failed.
-      flags_ &= ~HistogramBase::kIsPersistent;
-      tentative_histogram = HeapAlloc(registered_ranges);
-      tentative_histogram->SetFlags(flags_);
-    }
-
-    FillHistogram(tentative_histogram.get());
-
-    // Register this histogram with the StatisticsRecorder. Keep a copy of
-    // the pointer value to tell later whether the locally created histogram
-    // was registered or deleted. The type is "void" because it could point
-    // to released memory after the following line.
-    const void* tentative_histogram_ptr = tentative_histogram.get();
-    histogram = StatisticsRecorder::RegisterOrDeleteDuplicate(
-        tentative_histogram.release());
-
-    // Persistent histograms need some follow-up processing.
-    if (histogram_ref) {
-      allocator->FinalizeHistogram(histogram_ref,
-                                   histogram == tentative_histogram_ptr);
-    }
-  }
-
-  if (histogram_type_ != histogram->GetHistogramType() ||
-      (bucket_count_ != 0 && !histogram->HasConstructionArguments(
-                                 minimum_, maximum_, bucket_count_))) {
-    // The construction arguments do not match the existing histogram.  This can
-    // come about if an extension updates in the middle of a chrome run and has
-    // changed one of them, or simply by bad code within Chrome itself.  A NULL
-    // return would cause Chrome to crash; better to just record it for later
-    // analysis.
-    UmaHistogramSparse("Histogram.MismatchedConstructionArguments",
-                       static_cast<Sample>(HashMetricName(name_)));
-    DLOG(ERROR) << "Histogram " << name_
-                << " has mismatched construction arguments";
-    return DummyHistogram::GetInstance();
-  }
-  return histogram;
-}
-
-HistogramBase* Histogram::FactoryGet(const std::string& name,
-                                     Sample minimum,
-                                     Sample maximum,
-                                     uint32_t bucket_count,
-                                     int32_t flags) {
-  bool valid_arguments =
-      InspectConstructionArguments(name, &minimum, &maximum, &bucket_count);
-  DCHECK(valid_arguments);
-
-  return Factory(name, minimum, maximum, bucket_count, flags).Build();
-}
-
-HistogramBase* Histogram::FactoryTimeGet(const std::string& name,
-                                         TimeDelta minimum,
-                                         TimeDelta maximum,
-                                         uint32_t bucket_count,
-                                         int32_t flags) {
-  return FactoryGet(name, static_cast<Sample>(minimum.InMilliseconds()),
-                    static_cast<Sample>(maximum.InMilliseconds()), bucket_count,
-                    flags);
-}
-
-HistogramBase* Histogram::FactoryMicrosecondsTimeGet(const std::string& name,
-                                                     TimeDelta minimum,
-                                                     TimeDelta maximum,
-                                                     uint32_t bucket_count,
-                                                     int32_t flags) {
-  return FactoryGet(name, static_cast<Sample>(minimum.InMicroseconds()),
-                    static_cast<Sample>(maximum.InMicroseconds()), bucket_count,
-                    flags);
-}
-
-HistogramBase* Histogram::FactoryGet(const char* name,
-                                     Sample minimum,
-                                     Sample maximum,
-                                     uint32_t bucket_count,
-                                     int32_t flags) {
-  return FactoryGet(std::string(name), minimum, maximum, bucket_count, flags);
-}
-
-HistogramBase* Histogram::FactoryTimeGet(const char* name,
-                                         TimeDelta minimum,
-                                         TimeDelta maximum,
-                                         uint32_t bucket_count,
-                                         int32_t flags) {
-  return FactoryTimeGet(std::string(name), minimum, maximum, bucket_count,
-                        flags);
-}
-
-HistogramBase* Histogram::FactoryMicrosecondsTimeGet(const char* name,
-                                                     TimeDelta minimum,
-                                                     TimeDelta maximum,
-                                                     uint32_t bucket_count,
-                                                     int32_t flags) {
-  return FactoryMicrosecondsTimeGet(std::string(name), minimum, maximum,
-                                    bucket_count, flags);
-}
-
-std::unique_ptr<HistogramBase> Histogram::PersistentCreate(
-    const char* name,
-    Sample minimum,
-    Sample maximum,
-    const BucketRanges* ranges,
-    const DelayedPersistentAllocation& counts,
-    const DelayedPersistentAllocation& logged_counts,
-    HistogramSamples::Metadata* meta,
-    HistogramSamples::Metadata* logged_meta) {
-  return WrapUnique(new Histogram(name, minimum, maximum, ranges, counts,
-                                  logged_counts, meta, logged_meta));
-}
-
-// Calculate what range of values are held in each bucket.
-// We have to be careful that we don't pick a ratio between starting points in
-// consecutive buckets that is sooo small, that the integer bounds are the same
-// (effectively making one bucket get no values).  We need to avoid:
-//   ranges(i) == ranges(i + 1)
-// To avoid that, we just do a fine-grained bucket width as far as we need to
-// until we get a ratio that moves us along at least 2 units at a time.  From
-// that bucket onward we do use the exponential growth of buckets.
-//
-// static
-void Histogram::InitializeBucketRanges(Sample minimum,
-                                       Sample maximum,
-                                       BucketRanges* ranges) {
-  double log_max = log(static_cast<double>(maximum));
-  double log_ratio;
-  double log_next;
-  size_t bucket_index = 1;
-  Sample current = minimum;
-  ranges->set_range(bucket_index, current);
-  size_t bucket_count = ranges->bucket_count();
-  while (bucket_count > ++bucket_index) {
-    double log_current;
-    log_current = log(static_cast<double>(current));
-    // Calculate the count'th root of the range.
-    log_ratio = (log_max - log_current) / (bucket_count - bucket_index);
-    // See where the next bucket would start.
-    log_next = log_current + log_ratio;
-    Sample next;
-    next = static_cast<int>(std::round(exp(log_next)));
-    if (next > current)
-      current = next;
-    else
-      ++current;  // Just do a narrow bucket, and keep trying.
-    ranges->set_range(bucket_index, current);
-  }
-  ranges->set_range(ranges->bucket_count(), HistogramBase::kSampleType_MAX);
-  ranges->ResetChecksum();
-}
-
-// static
-const int Histogram::kCommonRaceBasedCountMismatch = 5;
-
-uint32_t Histogram::FindCorruption(const HistogramSamples& samples) const {
-  int inconsistencies = NO_INCONSISTENCIES;
-  Sample previous_range = -1;  // Bottom range is always 0.
-  for (uint32_t index = 0; index < bucket_count(); ++index) {
-    int new_range = ranges(index);
-    if (previous_range >= new_range)
-      inconsistencies |= BUCKET_ORDER_ERROR;
-    previous_range = new_range;
-  }
-
-  if (!bucket_ranges()->HasValidChecksum())
-    inconsistencies |= RANGE_CHECKSUM_ERROR;
-
-  int64_t delta64 = samples.redundant_count() - samples.TotalCount();
-  if (delta64 != 0) {
-    int delta = static_cast<int>(delta64);
-    if (delta != delta64)
-      delta = INT_MAX;  // Flag all giant errors as INT_MAX.
-    if (delta > 0) {
-      if (delta > kCommonRaceBasedCountMismatch)
-        inconsistencies |= COUNT_HIGH_ERROR;
-    } else {
-      DCHECK_GT(0, delta);
-      if (-delta > kCommonRaceBasedCountMismatch)
-        inconsistencies |= COUNT_LOW_ERROR;
-    }
-  }
-  return inconsistencies;
-}
-
-const BucketRanges* Histogram::bucket_ranges() const {
-  return unlogged_samples_->bucket_ranges();
-}
-
-Sample Histogram::declared_min() const {
-  const BucketRanges* ranges = bucket_ranges();
-  if (ranges->bucket_count() < 2)
-    return -1;
-  return ranges->range(1);
-}
-
-Sample Histogram::declared_max() const {
-  const BucketRanges* ranges = bucket_ranges();
-  if (ranges->bucket_count() < 2)
-    return -1;
-  return ranges->range(ranges->bucket_count() - 1);
-}
-
-Sample Histogram::ranges(uint32_t i) const {
-  return bucket_ranges()->range(i);
-}
-
-uint32_t Histogram::bucket_count() const {
-  return static_cast<uint32_t>(bucket_ranges()->bucket_count());
-}
-
-// static
-bool Histogram::InspectConstructionArguments(StringPiece name,
-                                             Sample* minimum,
-                                             Sample* maximum,
-                                             uint32_t* bucket_count) {
-  // Defensive code for backward compatibility.
-  if (*minimum < 1) {
-    DVLOG(1) << "Histogram: " << name << " has bad minimum: " << *minimum;
-    *minimum = 1;
-  }
-  if (*maximum >= kSampleType_MAX) {
-    DVLOG(1) << "Histogram: " << name << " has bad maximum: " << *maximum;
-    *maximum = kSampleType_MAX - 1;
-  }
-  if (*bucket_count >= kBucketCount_MAX) {
-    DVLOG(1) << "Histogram: " << name << " has bad bucket_count: "
-             << *bucket_count;
-    *bucket_count = kBucketCount_MAX - 1;
-  }
-
-  bool check_okay = true;
-
-  if (*minimum > *maximum) {
-    check_okay = false;
-    std::swap(*minimum, *maximum);
-  }
-  if (*maximum == *minimum) {
-    check_okay = false;
-    *maximum = *minimum + 1;
-  }
-  if (*bucket_count < 3) {
-    check_okay = false;
-    *bucket_count = 3;
-  }
-  // Very high bucket counts are wasteful. Use a sparse histogram instead.
-  // Value of 10002 equals a user-supplied value of 10k + 2 overflow buckets.
-  constexpr uint32_t kMaxBucketCount = 10002;
-  if (*bucket_count > kMaxBucketCount) {
-    check_okay = false;
-    *bucket_count = kMaxBucketCount;
-  }
-  if (*bucket_count > static_cast<uint32_t>(*maximum - *minimum + 2)) {
-    check_okay = false;
-    *bucket_count = static_cast<uint32_t>(*maximum - *minimum + 2);
-  }
-
-  if (!check_okay) {
-    UmaHistogramSparse("Histogram.BadConstructionArguments",
-                       static_cast<Sample>(HashMetricName(name)));
-  }
-
-  return check_okay;
-}
-
-uint64_t Histogram::name_hash() const {
-  return unlogged_samples_->id();
-}
-
-HistogramType Histogram::GetHistogramType() const {
-  return HISTOGRAM;
-}
-
-bool Histogram::HasConstructionArguments(Sample expected_minimum,
-                                         Sample expected_maximum,
-                                         uint32_t expected_bucket_count) const {
-  return (expected_bucket_count == bucket_count() &&
-          expected_minimum == declared_min() &&
-          expected_maximum == declared_max());
-}
-
-void Histogram::Add(int value) {
-  AddCount(value, 1);
-}
-
-void Histogram::AddCount(int value, int count) {
-  DCHECK_EQ(0, ranges(0));
-  DCHECK_EQ(kSampleType_MAX, ranges(bucket_count()));
-
-  if (value > kSampleType_MAX - 1)
-    value = kSampleType_MAX - 1;
-  if (value < 0)
-    value = 0;
-  if (count <= 0) {
-    NOTREACHED();
-    return;
-  }
-  unlogged_samples_->Accumulate(value, count);
-
-  FindAndRunCallback(value);
-}
-
-std::unique_ptr<HistogramSamples> Histogram::SnapshotSamples() const {
-  return SnapshotAllSamples();
-}
-
-std::unique_ptr<HistogramSamples> Histogram::SnapshotDelta() {
-#if DCHECK_IS_ON()
-  DCHECK(!final_delta_created_);
-#endif
-
-  // The code below has subtle thread-safety guarantees! All changes to
-  // the underlying SampleVectors use atomic integer operations, which guarantee
-  // eventual consistency, but do not guarantee full synchronization between
-  // different entries in the SampleVector. In particular, this means that
-  // concurrent updates to the histogram might result in the reported sum not
-  // matching the individual bucket counts; or there being some buckets that are
-  // logically updated "together", but end up being only partially updated when
-  // a snapshot is captured. Note that this is why it's important to subtract
-  // exactly the snapshotted unlogged samples, rather than simply resetting the
-  // vector: this way, the next snapshot will include any concurrent updates
-  // missed by the current snapshot.
-
-  std::unique_ptr<HistogramSamples> snapshot = SnapshotUnloggedSamples();
-  unlogged_samples_->Subtract(*snapshot);
-  logged_samples_->Add(*snapshot);
-
-  return snapshot;
-}
-
-std::unique_ptr<HistogramSamples> Histogram::SnapshotFinalDelta() const {
-#if DCHECK_IS_ON()
-  DCHECK(!final_delta_created_);
-  final_delta_created_ = true;
-#endif
-
-  return SnapshotUnloggedSamples();
-}
-
-void Histogram::AddSamples(const HistogramSamples& samples) {
-  unlogged_samples_->Add(samples);
-}
-
-bool Histogram::AddSamplesFromPickle(PickleIterator* iter) {
-  return unlogged_samples_->AddFromPickle(iter);
-}
-
-// The following methods provide a graphical histogram display.
-void Histogram::WriteHTMLGraph(std::string* output) const {
-  // TBD(jar) Write a nice HTML bar chart, with divs an mouse-overs etc.
-  output->append("<PRE>");
-  WriteAsciiImpl(true, "<br>", output);
-  output->append("</PRE>");
-}
-
-void Histogram::WriteAscii(std::string* output) const {
-  WriteAsciiImpl(true, "\n", output);
-}
-
-void Histogram::ValidateHistogramContents() const {
-  CHECK(unlogged_samples_);
-  CHECK(unlogged_samples_->bucket_ranges());
-  CHECK(logged_samples_);
-  CHECK(logged_samples_->bucket_ranges());
-#if !defined(OS_NACL)
-  if (0U == logged_samples_->id() && (flags() & kIsPersistent)) {
-    // ID should never be zero. If it is, then it's probably because the
-    // entire memory page was cleared. Check that this is true.
-    // TODO(bcwhite): Remove this.
-    // https://bugs.chromium.org/p/chromium/issues/detail?id=836875
-    size_t page_size = SysInfo::VMAllocationGranularity();
-    if (page_size == 0)
-      page_size = 1024;
-    const int* address = reinterpret_cast<const int*>(
-        reinterpret_cast<uintptr_t>(logged_samples_->meta()) &
-        ~(page_size - 1));
-    // Check a couple places so there is evidence in a crash report as to
-    // where it was non-zero.
-    CHECK_EQ(0, address[0]);
-    CHECK_EQ(0, address[1]);
-    CHECK_EQ(0, address[2]);
-    CHECK_EQ(0, address[4]);
-    CHECK_EQ(0, address[8]);
-    CHECK_EQ(0, address[16]);
-    CHECK_EQ(0, address[32]);
-    CHECK_EQ(0, address[64]);
-    CHECK_EQ(0, address[128]);
-    CHECK_EQ(0, address[256]);
-    CHECK_EQ(0, address[512]);
-    // Now check every address.
-    for (size_t i = 0; i < page_size / sizeof(int); ++i)
-      CHECK_EQ(0, address[i]);
-  }
-#endif
-  CHECK_NE(0U, logged_samples_->id());
-}
-
-void Histogram::SerializeInfoImpl(Pickle* pickle) const {
-  DCHECK(bucket_ranges()->HasValidChecksum());
-  pickle->WriteString(histogram_name());
-  pickle->WriteInt(flags());
-  pickle->WriteInt(declared_min());
-  pickle->WriteInt(declared_max());
-  pickle->WriteUInt32(bucket_count());
-  pickle->WriteUInt32(bucket_ranges()->checksum());
-}
-
-// TODO(bcwhite): Remove minimum/maximum parameters from here and call chain.
-Histogram::Histogram(const char* name,
-                     Sample minimum,
-                     Sample maximum,
-                     const BucketRanges* ranges)
-    : HistogramBase(name) {
-  DCHECK(ranges) << name << ": " << minimum << "-" << maximum;
-  unlogged_samples_.reset(new SampleVector(HashMetricName(name), ranges));
-  logged_samples_.reset(new SampleVector(unlogged_samples_->id(), ranges));
-}
-
-Histogram::Histogram(const char* name,
-                     Sample minimum,
-                     Sample maximum,
-                     const BucketRanges* ranges,
-                     const DelayedPersistentAllocation& counts,
-                     const DelayedPersistentAllocation& logged_counts,
-                     HistogramSamples::Metadata* meta,
-                     HistogramSamples::Metadata* logged_meta)
-    : HistogramBase(name) {
-  DCHECK(ranges) << name << ": " << minimum << "-" << maximum;
-  unlogged_samples_.reset(
-      new PersistentSampleVector(HashMetricName(name), ranges, meta, counts));
-  logged_samples_.reset(new PersistentSampleVector(
-      unlogged_samples_->id(), ranges, logged_meta, logged_counts));
-}
-
-Histogram::~Histogram() = default;
-
-bool Histogram::PrintEmptyBucket(uint32_t index) const {
-  return true;
-}
-
-// Use the actual bucket widths (like a linear histogram) until the widths get
-// over some transition value, and then use that transition width.  Exponentials
-// get so big so fast (and we don't expect to see a lot of entries in the large
-// buckets), so we need this to make it possible to see what is going on and
-// not have 0-graphical-height buckets.
-double Histogram::GetBucketSize(Count current, uint32_t i) const {
-  DCHECK_GT(ranges(i + 1), ranges(i));
-  static const double kTransitionWidth = 5;
-  double denominator = ranges(i + 1) - ranges(i);
-  if (denominator > kTransitionWidth)
-    denominator = kTransitionWidth;  // Stop trying to normalize.
-  return current/denominator;
-}
-
-const std::string Histogram::GetAsciiBucketRange(uint32_t i) const {
-  return GetSimpleAsciiBucketRange(ranges(i));
-}
-
-//------------------------------------------------------------------------------
-// Private methods
-
-// static
-HistogramBase* Histogram::DeserializeInfoImpl(PickleIterator* iter) {
-  std::string histogram_name;
-  int flags;
-  int declared_min;
-  int declared_max;
-  uint32_t bucket_count;
-  uint32_t range_checksum;
-
-  if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
-                              &declared_max, &bucket_count, &range_checksum)) {
-    return nullptr;
-  }
-
-  // Find or create the local version of the histogram in this process.
-  HistogramBase* histogram = Histogram::FactoryGet(
-      histogram_name, declared_min, declared_max, bucket_count, flags);
-  if (!histogram)
-    return nullptr;
-
-  // The serialized histogram might be corrupted.
-  if (!ValidateRangeChecksum(*histogram, range_checksum))
-    return nullptr;
-
-  return histogram;
-}
-
-std::unique_ptr<SampleVector> Histogram::SnapshotAllSamples() const {
-  std::unique_ptr<SampleVector> samples = SnapshotUnloggedSamples();
-  samples->Add(*logged_samples_);
-  return samples;
-}
-
-std::unique_ptr<SampleVector> Histogram::SnapshotUnloggedSamples() const {
-  std::unique_ptr<SampleVector> samples(
-      new SampleVector(unlogged_samples_->id(), bucket_ranges()));
-  samples->Add(*unlogged_samples_);
-  return samples;
-}
-
-void Histogram::WriteAsciiImpl(bool graph_it,
-                               const std::string& newline,
-                               std::string* output) const {
-  // Get local (stack) copies of all effectively volatile class data so that we
-  // are consistent across our output activities.
-  std::unique_ptr<SampleVector> snapshot = SnapshotAllSamples();
-  Count sample_count = snapshot->TotalCount();
-
-  WriteAsciiHeader(*snapshot, sample_count, output);
-  output->append(newline);
-
-  // Prepare to normalize graphical rendering of bucket contents.
-  double max_size = 0;
-  if (graph_it)
-    max_size = GetPeakBucketSize(*snapshot);
-
-  // Calculate space needed to print bucket range numbers.  Leave room to print
-  // nearly the largest bucket range without sliding over the histogram.
-  uint32_t largest_non_empty_bucket = bucket_count() - 1;
-  while (0 == snapshot->GetCountAtIndex(largest_non_empty_bucket)) {
-    if (0 == largest_non_empty_bucket)
-      break;  // All buckets are empty.
-    --largest_non_empty_bucket;
-  }
-
-  // Calculate largest print width needed for any of our bucket range displays.
-  size_t print_width = 1;
-  for (uint32_t i = 0; i < bucket_count(); ++i) {
-    if (snapshot->GetCountAtIndex(i)) {
-      size_t width = GetAsciiBucketRange(i).size() + 1;
-      if (width > print_width)
-        print_width = width;
-    }
-  }
-
-  int64_t remaining = sample_count;
-  int64_t past = 0;
-  // Output the actual histogram graph.
-  for (uint32_t i = 0; i < bucket_count(); ++i) {
-    Count current = snapshot->GetCountAtIndex(i);
-    if (!current && !PrintEmptyBucket(i))
-      continue;
-    remaining -= current;
-    std::string range = GetAsciiBucketRange(i);
-    output->append(range);
-    for (size_t j = 0; range.size() + j < print_width + 1; ++j)
-      output->push_back(' ');
-    if (0 == current && i < bucket_count() - 1 &&
-        0 == snapshot->GetCountAtIndex(i + 1)) {
-      while (i < bucket_count() - 1 &&
-             0 == snapshot->GetCountAtIndex(i + 1)) {
-        ++i;
-      }
-      output->append("... ");
-      output->append(newline);
-      continue;  // No reason to plot emptiness.
-    }
-    double current_size = GetBucketSize(current, i);
-    if (graph_it)
-      WriteAsciiBucketGraph(current_size, max_size, output);
-    WriteAsciiBucketContext(past, current, remaining, i, output);
-    output->append(newline);
-    past += current;
-  }
-  DCHECK_EQ(sample_count, past);
-}
-
-double Histogram::GetPeakBucketSize(const SampleVectorBase& samples) const {
-  double max = 0;
-  for (uint32_t i = 0; i < bucket_count() ; ++i) {
-    double current_size = GetBucketSize(samples.GetCountAtIndex(i), i);
-    if (current_size > max)
-      max = current_size;
-  }
-  return max;
-}
-
-void Histogram::WriteAsciiHeader(const SampleVectorBase& samples,
-                                 Count sample_count,
-                                 std::string* output) const {
-  StringAppendF(output, "Histogram: %s recorded %d samples", histogram_name(),
-                sample_count);
-  if (sample_count == 0) {
-    DCHECK_EQ(samples.sum(), 0);
-  } else {
-    double mean = static_cast<float>(samples.sum()) / sample_count;
-    StringAppendF(output, ", mean = %.1f", mean);
-  }
-  if (flags())
-    StringAppendF(output, " (flags = 0x%x)", flags());
-}
-
-void Histogram::WriteAsciiBucketContext(const int64_t past,
-                                        const Count current,
-                                        const int64_t remaining,
-                                        const uint32_t i,
-                                        std::string* output) const {
-  double scaled_sum = (past + current + remaining) / 100.0;
-  WriteAsciiBucketValue(current, scaled_sum, output);
-  if (0 < i) {
-    double percentage = past / scaled_sum;
-    StringAppendF(output, " {%3.1f%%}", percentage);
-  }
-}
-
-void Histogram::GetParameters(DictionaryValue* params) const {
-  params->SetString("type", HistogramTypeToString(GetHistogramType()));
-  params->SetInteger("min", declared_min());
-  params->SetInteger("max", declared_max());
-  params->SetInteger("bucket_count", static_cast<int>(bucket_count()));
-}
-
-void Histogram::GetCountAndBucketData(Count* count,
-                                      int64_t* sum,
-                                      ListValue* buckets) const {
-  std::unique_ptr<SampleVector> snapshot = SnapshotAllSamples();
-  *count = snapshot->TotalCount();
-  *sum = snapshot->sum();
-  uint32_t index = 0;
-  for (uint32_t i = 0; i < bucket_count(); ++i) {
-    Sample count_at_index = snapshot->GetCountAtIndex(i);
-    if (count_at_index > 0) {
-      std::unique_ptr<DictionaryValue> bucket_value(new DictionaryValue());
-      bucket_value->SetInteger("low", ranges(i));
-      if (i != bucket_count() - 1)
-        bucket_value->SetInteger("high", ranges(i + 1));
-      bucket_value->SetInteger("count", count_at_index);
-      buckets->Set(index, std::move(bucket_value));
-      ++index;
-    }
-  }
-}
-
-//------------------------------------------------------------------------------
-// LinearHistogram: This histogram uses a traditional set of evenly spaced
-// buckets.
-//------------------------------------------------------------------------------
-
-class LinearHistogram::Factory : public Histogram::Factory {
- public:
-  Factory(const std::string& name,
-          HistogramBase::Sample minimum,
-          HistogramBase::Sample maximum,
-          uint32_t bucket_count,
-          int32_t flags,
-          const DescriptionPair* descriptions)
-    : Histogram::Factory(name, LINEAR_HISTOGRAM, minimum, maximum,
-                         bucket_count, flags) {
-    descriptions_ = descriptions;
-  }
-
- protected:
-  BucketRanges* CreateRanges() override {
-    BucketRanges* ranges = new BucketRanges(bucket_count_ + 1);
-    LinearHistogram::InitializeBucketRanges(minimum_, maximum_, ranges);
-    return ranges;
-  }
-
-  std::unique_ptr<HistogramBase> HeapAlloc(
-      const BucketRanges* ranges) override {
-    return WrapUnique(new LinearHistogram(GetPermanentName(name_), minimum_,
-                                          maximum_, ranges));
-  }
-
-  void FillHistogram(HistogramBase* base_histogram) override {
-    Histogram::Factory::FillHistogram(base_histogram);
-    // Normally, |base_histogram| should have type LINEAR_HISTOGRAM or be
-    // inherited from it. However, if it's expired, it will actually be a
-    // DUMMY_HISTOGRAM. Skip filling in that case.
-    if (base_histogram->GetHistogramType() == DUMMY_HISTOGRAM)
-      return;
-    LinearHistogram* histogram = static_cast<LinearHistogram*>(base_histogram);
-    // Set range descriptions.
-    if (descriptions_) {
-      for (int i = 0; descriptions_[i].description; ++i) {
-        histogram->bucket_description_[descriptions_[i].sample] =
-            descriptions_[i].description;
-      }
-    }
-  }
-
- private:
-  const DescriptionPair* descriptions_;
-
-  DISALLOW_COPY_AND_ASSIGN(Factory);
-};
-
-LinearHistogram::~LinearHistogram() = default;
-
-HistogramBase* LinearHistogram::FactoryGet(const std::string& name,
-                                           Sample minimum,
-                                           Sample maximum,
-                                           uint32_t bucket_count,
-                                           int32_t flags) {
-  return FactoryGetWithRangeDescription(name, minimum, maximum, bucket_count,
-                                        flags, NULL);
-}
-
-HistogramBase* LinearHistogram::FactoryTimeGet(const std::string& name,
-                                               TimeDelta minimum,
-                                               TimeDelta maximum,
-                                               uint32_t bucket_count,
-                                               int32_t flags) {
-  return FactoryGet(name, static_cast<Sample>(minimum.InMilliseconds()),
-                    static_cast<Sample>(maximum.InMilliseconds()), bucket_count,
-                    flags);
-}
-
-HistogramBase* LinearHistogram::FactoryGet(const char* name,
-                                           Sample minimum,
-                                           Sample maximum,
-                                           uint32_t bucket_count,
-                                           int32_t flags) {
-  return FactoryGet(std::string(name), minimum, maximum, bucket_count, flags);
-}
-
-HistogramBase* LinearHistogram::FactoryTimeGet(const char* name,
-                                               TimeDelta minimum,
-                                               TimeDelta maximum,
-                                               uint32_t bucket_count,
-                                               int32_t flags) {
-  return FactoryTimeGet(std::string(name),  minimum, maximum, bucket_count,
-                        flags);
-}
-
-std::unique_ptr<HistogramBase> LinearHistogram::PersistentCreate(
-    const char* name,
-    Sample minimum,
-    Sample maximum,
-    const BucketRanges* ranges,
-    const DelayedPersistentAllocation& counts,
-    const DelayedPersistentAllocation& logged_counts,
-    HistogramSamples::Metadata* meta,
-    HistogramSamples::Metadata* logged_meta) {
-  return WrapUnique(new LinearHistogram(name, minimum, maximum, ranges, counts,
-                                        logged_counts, meta, logged_meta));
-}
-
-HistogramBase* LinearHistogram::FactoryGetWithRangeDescription(
-    const std::string& name,
-    Sample minimum,
-    Sample maximum,
-    uint32_t bucket_count,
-    int32_t flags,
-    const DescriptionPair descriptions[]) {
-  bool valid_arguments = Histogram::InspectConstructionArguments(
-      name, &minimum, &maximum, &bucket_count);
-  DCHECK(valid_arguments);
-
-  return Factory(name, minimum, maximum, bucket_count, flags, descriptions)
-      .Build();
-}
-
-HistogramType LinearHistogram::GetHistogramType() const {
-  return LINEAR_HISTOGRAM;
-}
-
-LinearHistogram::LinearHistogram(const char* name,
-                                 Sample minimum,
-                                 Sample maximum,
-                                 const BucketRanges* ranges)
-    : Histogram(name, minimum, maximum, ranges) {}
-
-LinearHistogram::LinearHistogram(
-    const char* name,
-    Sample minimum,
-    Sample maximum,
-    const BucketRanges* ranges,
-    const DelayedPersistentAllocation& counts,
-    const DelayedPersistentAllocation& logged_counts,
-    HistogramSamples::Metadata* meta,
-    HistogramSamples::Metadata* logged_meta)
-    : Histogram(name,
-                minimum,
-                maximum,
-                ranges,
-                counts,
-                logged_counts,
-                meta,
-                logged_meta) {}
-
-double LinearHistogram::GetBucketSize(Count current, uint32_t i) const {
-  DCHECK_GT(ranges(i + 1), ranges(i));
-  // Adjacent buckets with different widths would have "surprisingly" many (few)
-  // samples in a histogram if we didn't normalize this way.
-  double denominator = ranges(i + 1) - ranges(i);
-  return current/denominator;
-}
-
-const std::string LinearHistogram::GetAsciiBucketRange(uint32_t i) const {
-  int range = ranges(i);
-  BucketDescriptionMap::const_iterator it = bucket_description_.find(range);
-  if (it == bucket_description_.end())
-    return Histogram::GetAsciiBucketRange(i);
-  return it->second;
-}
-
-bool LinearHistogram::PrintEmptyBucket(uint32_t index) const {
-  return bucket_description_.find(ranges(index)) == bucket_description_.end();
-}
-
-// static
-void LinearHistogram::InitializeBucketRanges(Sample minimum,
-                                             Sample maximum,
-                                             BucketRanges* ranges) {
-  double min = minimum;
-  double max = maximum;
-  size_t bucket_count = ranges->bucket_count();
-  for (size_t i = 1; i < bucket_count; ++i) {
-    double linear_range =
-        (min * (bucket_count - 1 - i) + max * (i - 1)) / (bucket_count - 2);
-    ranges->set_range(i, static_cast<Sample>(linear_range + 0.5));
-  }
-  ranges->set_range(ranges->bucket_count(), HistogramBase::kSampleType_MAX);
-  ranges->ResetChecksum();
-}
-
-// static
-HistogramBase* LinearHistogram::DeserializeInfoImpl(PickleIterator* iter) {
-  std::string histogram_name;
-  int flags;
-  int declared_min;
-  int declared_max;
-  uint32_t bucket_count;
-  uint32_t range_checksum;
-
-  if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
-                              &declared_max, &bucket_count, &range_checksum)) {
-    return nullptr;
-  }
-
-  HistogramBase* histogram = LinearHistogram::FactoryGet(
-      histogram_name, declared_min, declared_max, bucket_count, flags);
-  if (!histogram)
-    return nullptr;
-
-  if (!ValidateRangeChecksum(*histogram, range_checksum)) {
-    // The serialized histogram might be corrupted.
-    return nullptr;
-  }
-  return histogram;
-}
-
-//------------------------------------------------------------------------------
-// This section provides implementation for BooleanHistogram.
-//------------------------------------------------------------------------------
-
-class BooleanHistogram::Factory : public Histogram::Factory {
- public:
-  Factory(const std::string& name, int32_t flags)
-    : Histogram::Factory(name, BOOLEAN_HISTOGRAM, 1, 2, 3, flags) {}
-
- protected:
-  BucketRanges* CreateRanges() override {
-    BucketRanges* ranges = new BucketRanges(3 + 1);
-    LinearHistogram::InitializeBucketRanges(1, 2, ranges);
-    return ranges;
-  }
-
-  std::unique_ptr<HistogramBase> HeapAlloc(
-      const BucketRanges* ranges) override {
-    return WrapUnique(new BooleanHistogram(GetPermanentName(name_), ranges));
-  }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(Factory);
-};
-
-HistogramBase* BooleanHistogram::FactoryGet(const std::string& name,
-                                            int32_t flags) {
-  return Factory(name, flags).Build();
-}
-
-HistogramBase* BooleanHistogram::FactoryGet(const char* name, int32_t flags) {
-  return FactoryGet(std::string(name), flags);
-}
-
-std::unique_ptr<HistogramBase> BooleanHistogram::PersistentCreate(
-    const char* name,
-    const BucketRanges* ranges,
-    const DelayedPersistentAllocation& counts,
-    const DelayedPersistentAllocation& logged_counts,
-    HistogramSamples::Metadata* meta,
-    HistogramSamples::Metadata* logged_meta) {
-  return WrapUnique(new BooleanHistogram(name, ranges, counts, logged_counts,
-                                         meta, logged_meta));
-}
-
-HistogramType BooleanHistogram::GetHistogramType() const {
-  return BOOLEAN_HISTOGRAM;
-}
-
-BooleanHistogram::BooleanHistogram(const char* name, const BucketRanges* ranges)
-    : LinearHistogram(name, 1, 2, ranges) {}
-
-BooleanHistogram::BooleanHistogram(
-    const char* name,
-    const BucketRanges* ranges,
-    const DelayedPersistentAllocation& counts,
-    const DelayedPersistentAllocation& logged_counts,
-    HistogramSamples::Metadata* meta,
-    HistogramSamples::Metadata* logged_meta)
-    : LinearHistogram(name,
-                      1,
-                      2,
-                      ranges,
-                      counts,
-                      logged_counts,
-                      meta,
-                      logged_meta) {}
-
-HistogramBase* BooleanHistogram::DeserializeInfoImpl(PickleIterator* iter) {
-  std::string histogram_name;
-  int flags;
-  int declared_min;
-  int declared_max;
-  uint32_t bucket_count;
-  uint32_t range_checksum;
-
-  if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
-                              &declared_max, &bucket_count, &range_checksum)) {
-    return nullptr;
-  }
-
-  HistogramBase* histogram = BooleanHistogram::FactoryGet(
-      histogram_name, flags);
-  if (!histogram)
-    return nullptr;
-
-  if (!ValidateRangeChecksum(*histogram, range_checksum)) {
-    // The serialized histogram might be corrupted.
-    return nullptr;
-  }
-  return histogram;
-}
-
-//------------------------------------------------------------------------------
-// CustomHistogram:
-//------------------------------------------------------------------------------
-
-class CustomHistogram::Factory : public Histogram::Factory {
- public:
-  Factory(const std::string& name,
-          const std::vector<Sample>* custom_ranges,
-          int32_t flags)
-    : Histogram::Factory(name, CUSTOM_HISTOGRAM, 0, 0, 0, flags) {
-    custom_ranges_ = custom_ranges;
-  }
-
- protected:
-  BucketRanges* CreateRanges() override {
-    // Remove the duplicates in the custom ranges array.
-    std::vector<int> ranges = *custom_ranges_;
-    ranges.push_back(0);  // Ensure we have a zero value.
-    ranges.push_back(HistogramBase::kSampleType_MAX);
-    std::sort(ranges.begin(), ranges.end());
-    ranges.erase(std::unique(ranges.begin(), ranges.end()), ranges.end());
-
-    BucketRanges* bucket_ranges = new BucketRanges(ranges.size());
-    for (uint32_t i = 0; i < ranges.size(); i++) {
-      bucket_ranges->set_range(i, ranges[i]);
-    }
-    bucket_ranges->ResetChecksum();
-    return bucket_ranges;
-  }
-
-  std::unique_ptr<HistogramBase> HeapAlloc(
-      const BucketRanges* ranges) override {
-    return WrapUnique(new CustomHistogram(GetPermanentName(name_), ranges));
-  }
-
- private:
-  const std::vector<Sample>* custom_ranges_;
-
-  DISALLOW_COPY_AND_ASSIGN(Factory);
-};
-
-HistogramBase* CustomHistogram::FactoryGet(
-    const std::string& name,
-    const std::vector<Sample>& custom_ranges,
-    int32_t flags) {
-  CHECK(ValidateCustomRanges(custom_ranges));
-
-  return Factory(name, &custom_ranges, flags).Build();
-}
-
-HistogramBase* CustomHistogram::FactoryGet(
-    const char* name,
-    const std::vector<Sample>& custom_ranges,
-    int32_t flags) {
-  return FactoryGet(std::string(name), custom_ranges, flags);
-}
-
-std::unique_ptr<HistogramBase> CustomHistogram::PersistentCreate(
-    const char* name,
-    const BucketRanges* ranges,
-    const DelayedPersistentAllocation& counts,
-    const DelayedPersistentAllocation& logged_counts,
-    HistogramSamples::Metadata* meta,
-    HistogramSamples::Metadata* logged_meta) {
-  return WrapUnique(new CustomHistogram(name, ranges, counts, logged_counts,
-                                        meta, logged_meta));
-}
-
-HistogramType CustomHistogram::GetHistogramType() const {
-  return CUSTOM_HISTOGRAM;
-}
-
-// static
-std::vector<Sample> CustomHistogram::ArrayToCustomEnumRanges(
-    base::span<const Sample> values) {
-  std::vector<Sample> all_values;
-  for (Sample value : values) {
-    all_values.push_back(value);
-
-    // Ensure that a guard bucket is added. If we end up with duplicate
-    // values, FactoryGet will take care of removing them.
-    all_values.push_back(value + 1);
-  }
-  return all_values;
-}
-
-CustomHistogram::CustomHistogram(const char* name, const BucketRanges* ranges)
-    : Histogram(name,
-                ranges->range(1),
-                ranges->range(ranges->bucket_count() - 1),
-                ranges) {}
-
-CustomHistogram::CustomHistogram(
-    const char* name,
-    const BucketRanges* ranges,
-    const DelayedPersistentAllocation& counts,
-    const DelayedPersistentAllocation& logged_counts,
-    HistogramSamples::Metadata* meta,
-    HistogramSamples::Metadata* logged_meta)
-    : Histogram(name,
-                ranges->range(1),
-                ranges->range(ranges->bucket_count() - 1),
-                ranges,
-                counts,
-                logged_counts,
-                meta,
-                logged_meta) {}
-
-void CustomHistogram::SerializeInfoImpl(Pickle* pickle) const {
-  Histogram::SerializeInfoImpl(pickle);
-
-  // Serialize ranges. First and last ranges are alwasy 0 and INT_MAX, so don't
-  // write them.
-  for (uint32_t i = 1; i < bucket_ranges()->bucket_count(); ++i)
-    pickle->WriteInt(bucket_ranges()->range(i));
-}
-
-double CustomHistogram::GetBucketSize(Count current, uint32_t i) const {
-  // If this is a histogram of enum values, normalizing the bucket count
-  // by the bucket range is not helpful, so just return the bucket count.
-  return current;
-}
-
-// static
-HistogramBase* CustomHistogram::DeserializeInfoImpl(PickleIterator* iter) {
-  std::string histogram_name;
-  int flags;
-  int declared_min;
-  int declared_max;
-  uint32_t bucket_count;
-  uint32_t range_checksum;
-
-  if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
-                              &declared_max, &bucket_count, &range_checksum)) {
-    return nullptr;
-  }
-
-  // First and last ranges are not serialized.
-  std::vector<Sample> sample_ranges(bucket_count - 1);
-
-  for (uint32_t i = 0; i < sample_ranges.size(); ++i) {
-    if (!iter->ReadInt(&sample_ranges[i]))
-      return nullptr;
-  }
-
-  HistogramBase* histogram = CustomHistogram::FactoryGet(
-      histogram_name, sample_ranges, flags);
-  if (!histogram)
-    return nullptr;
-
-  if (!ValidateRangeChecksum(*histogram, range_checksum)) {
-    // The serialized histogram might be corrupted.
-    return nullptr;
-  }
-  return histogram;
-}
-
-// static
-bool CustomHistogram::ValidateCustomRanges(
-    const std::vector<Sample>& custom_ranges) {
-  bool has_valid_range = false;
-  for (uint32_t i = 0; i < custom_ranges.size(); i++) {
-    Sample sample = custom_ranges[i];
-    if (sample < 0 || sample > HistogramBase::kSampleType_MAX - 1)
-      return false;
-    if (sample != 0)
-      has_valid_range = true;
-  }
-  return has_valid_range;
-}
-
-}  // namespace base
diff --git a/base/metrics/histogram.h b/base/metrics/histogram.h
deleted file mode 100644
index 35d8370..0000000
--- a/base/metrics/histogram.h
+++ /dev/null
@@ -1,559 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Histogram is an object that aggregates statistics, and can summarize them in
-// various forms, including ASCII graphical, HTML, and numerically (as a
-// vector of numbers corresponding to each of the aggregating buckets).
-
-// It supports calls to accumulate either time intervals (which are processed
-// as integral number of milliseconds), or arbitrary integral units.
-
-// For Histogram (exponential histogram), LinearHistogram and CustomHistogram,
-// the minimum for a declared range is 1 (instead of 0), while the maximum is
-// (HistogramBase::kSampleType_MAX - 1). However, there will always be underflow
-// and overflow buckets added automatically, so a 0 bucket will always exist
-// even when a minimum value of 1 is specified.
-
-// Each use of a histogram with the same name will reference the same underlying
-// data, so it is safe to record to the same histogram from multiple locations
-// in the code. It is a runtime error if all uses of the same histogram do not
-// agree exactly in type, bucket size and range.
-
-// For Histogram and LinearHistogram, the maximum for a declared range should
-// always be larger (not equal) than minimal range. Zero and
-// HistogramBase::kSampleType_MAX are implicitly added as first and last ranges,
-// so the smallest legal bucket_count is 3. However CustomHistogram can have
-// bucket count as 2 (when you give a custom ranges vector containing only 1
-// range).
-// For these 3 kinds of histograms, the max bucket count is always
-// (Histogram::kBucketCount_MAX - 1).
-
-// The buckets layout of class Histogram is exponential. For example, buckets
-// might contain (sequentially) the count of values in the following intervals:
-// [0,1), [1,2), [2,4), [4,8), [8,16), [16,32), [32,64), [64,infinity)
-// That bucket allocation would actually result from construction of a histogram
-// for values between 1 and 64, with 8 buckets, such as:
-// Histogram count("some name", 1, 64, 8);
-// Note that the underflow bucket [0,1) and the overflow bucket [64,infinity)
-// are also counted by the constructor in the user supplied "bucket_count"
-// argument.
-// The above example has an exponential ratio of 2 (doubling the bucket width
-// in each consecutive bucket).  The Histogram class automatically calculates
-// the smallest ratio that it can use to construct the number of buckets
-// selected in the constructor.  An another example, if you had 50 buckets,
-// and millisecond time values from 1 to 10000, then the ratio between
-// consecutive bucket widths will be approximately somewhere around the 50th
-// root of 10000.  This approach provides very fine grain (narrow) buckets
-// at the low end of the histogram scale, but allows the histogram to cover a
-// gigantic range with the addition of very few buckets.
-
-// Usually we use macros to define and use a histogram, which are defined in
-// base/metrics/histogram_macros.h. Note: Callers should include that header
-// directly if they only access the histogram APIs through macros.
-//
-// Macros use a pattern involving a function static variable, that is a pointer
-// to a histogram.  This static is explicitly initialized on any thread
-// that detects a uninitialized (NULL) pointer.  The potentially racy
-// initialization is not a problem as it is always set to point to the same
-// value (i.e., the FactoryGet always returns the same value).  FactoryGet
-// is also completely thread safe, which results in a completely thread safe,
-// and relatively fast, set of counters.  To avoid races at shutdown, the static
-// pointer is NOT deleted, and we leak the histograms at process termination.
-
-#ifndef BASE_METRICS_HISTOGRAM_H_
-#define BASE_METRICS_HISTOGRAM_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "base/base_export.h"
-#include "base/compiler_specific.h"
-#include "base/containers/span.h"
-#include "base/gtest_prod_util.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/metrics/bucket_ranges.h"
-#include "base/metrics/histogram_base.h"
-#include "base/metrics/histogram_samples.h"
-#include "base/strings/string_piece.h"
-#include "base/time/time.h"
-
-namespace base {
-
-class BooleanHistogram;
-class CustomHistogram;
-class DelayedPersistentAllocation;
-class Histogram;
-class LinearHistogram;
-class Pickle;
-class PickleIterator;
-class SampleVector;
-class SampleVectorBase;
-
-class BASE_EXPORT Histogram : public HistogramBase {
- public:
-  // Initialize maximum number of buckets in histograms as 16,384.
-  static const uint32_t kBucketCount_MAX;
-
-  typedef std::vector<Count> Counts;
-
-  ~Histogram() override;
-
-  //----------------------------------------------------------------------------
-  // For a valid histogram, input should follow these restrictions:
-  // minimum > 0 (if a minimum below 1 is specified, it will implicitly be
-  //              normalized up to 1)
-  // maximum > minimum
-  // buckets > 2 [minimum buckets needed: underflow, overflow and the range]
-  // Additionally,
-  // buckets <= (maximum - minimum + 2) - this is to ensure that we don't have
-  // more buckets than the range of numbers; having more buckets than 1 per
-  // value in the range would be nonsensical.
-  static HistogramBase* FactoryGet(const std::string& name,
-                                   Sample minimum,
-                                   Sample maximum,
-                                   uint32_t bucket_count,
-                                   int32_t flags);
-  static HistogramBase* FactoryTimeGet(const std::string& name,
-                                       base::TimeDelta minimum,
-                                       base::TimeDelta maximum,
-                                       uint32_t bucket_count,
-                                       int32_t flags);
-  static HistogramBase* FactoryMicrosecondsTimeGet(const std::string& name,
-                                                   base::TimeDelta minimum,
-                                                   base::TimeDelta maximum,
-                                                   uint32_t bucket_count,
-                                                   int32_t flags);
-
-  // Overloads of the above functions that take a const char* |name| param, to
-  // avoid code bloat from the std::string constructor being inlined into call
-  // sites.
-  static HistogramBase* FactoryGet(const char* name,
-                                   Sample minimum,
-                                   Sample maximum,
-                                   uint32_t bucket_count,
-                                   int32_t flags);
-  static HistogramBase* FactoryTimeGet(const char* name,
-                                       base::TimeDelta minimum,
-                                       base::TimeDelta maximum,
-                                       uint32_t bucket_count,
-                                       int32_t flags);
-  static HistogramBase* FactoryMicrosecondsTimeGet(const char* name,
-                                                   base::TimeDelta minimum,
-                                                   base::TimeDelta maximum,
-                                                   uint32_t bucket_count,
-                                                   int32_t flags);
-
-  // Create a histogram using data in persistent storage.
-  static std::unique_ptr<HistogramBase> PersistentCreate(
-      const char* name,
-      Sample minimum,
-      Sample maximum,
-      const BucketRanges* ranges,
-      const DelayedPersistentAllocation& counts,
-      const DelayedPersistentAllocation& logged_counts,
-      HistogramSamples::Metadata* meta,
-      HistogramSamples::Metadata* logged_meta);
-
-  static void InitializeBucketRanges(Sample minimum,
-                                     Sample maximum,
-                                     BucketRanges* ranges);
-
-  // This constant if for FindCorruption. Since snapshots of histograms are
-  // taken asynchronously relative to sampling, and our counting code currently
-  // does not prevent race conditions, it is pretty likely that we'll catch a
-  // redundant count that doesn't match the sample count.  We allow for a
-  // certain amount of slop before flagging this as an inconsistency. Even with
-  // an inconsistency, we'll snapshot it again (for UMA in about a half hour),
-  // so we'll eventually get the data, if it was not the result of a corruption.
-  static const int kCommonRaceBasedCountMismatch;
-
-  // Check to see if bucket ranges, counts and tallies in the snapshot are
-  // consistent with the bucket ranges and checksums in our histogram.  This can
-  // produce a false-alarm if a race occurred in the reading of the data during
-  // a SnapShot process, but should otherwise be false at all times (unless we
-  // have memory over-writes, or DRAM failures). Flag definitions are located
-  // under "enum Inconsistency" in base/metrics/histogram_base.h.
-  uint32_t FindCorruption(const HistogramSamples& samples) const override;
-
-  //----------------------------------------------------------------------------
-  // Accessors for factory construction, serialization and testing.
-  //----------------------------------------------------------------------------
-  const BucketRanges* bucket_ranges() const;
-  Sample declared_min() const;
-  Sample declared_max() const;
-  virtual Sample ranges(uint32_t i) const;
-  virtual uint32_t bucket_count() const;
-
-  // This function validates histogram construction arguments. It returns false
-  // if some of the arguments are bad but also corrects them so they should
-  // function on non-dcheck builds without crashing.
-  // Note. Currently it allow some bad input, e.g. 0 as minimum, but silently
-  // converts it to good input: 1.
-  // TODO(bcwhite): Use false returns to create "sink" histograms so that bad
-  // data doesn't create confusion on the servers.
-  static bool InspectConstructionArguments(StringPiece name,
-                                           Sample* minimum,
-                                           Sample* maximum,
-                                           uint32_t* bucket_count);
-
-  // HistogramBase implementation:
-  uint64_t name_hash() const override;
-  HistogramType GetHistogramType() const override;
-  bool HasConstructionArguments(Sample expected_minimum,
-                                Sample expected_maximum,
-                                uint32_t expected_bucket_count) const override;
-  void Add(Sample value) override;
-  void AddCount(Sample value, int count) override;
-  std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
-  std::unique_ptr<HistogramSamples> SnapshotDelta() override;
-  std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
-  void AddSamples(const HistogramSamples& samples) override;
-  bool AddSamplesFromPickle(base::PickleIterator* iter) override;
-  void WriteHTMLGraph(std::string* output) const override;
-  void WriteAscii(std::string* output) const override;
-
-  // Validates the histogram contents and CHECKs on errors.
-  // TODO(bcwhite): Remove this after https://crbug/836875.
-  void ValidateHistogramContents() const override;
-
- protected:
-  // This class, defined entirely within the .cc file, contains all the
-  // common logic for building a Histogram and can be overridden by more
-  // specific types to alter details of how the creation is done. It is
-  // defined as an embedded class (rather than an anonymous one) so it
-  // can access the protected constructors.
-  class Factory;
-
-  // |ranges| should contain the underflow and overflow buckets. See top
-  // comments for example.
-  Histogram(const char* name,
-            Sample minimum,
-            Sample maximum,
-            const BucketRanges* ranges);
-
-  // Traditionally, histograms allocate their own memory for the bucket
-  // vector but "shared" histograms use memory regions allocated from a
-  // special memory segment that is passed in here.  It is assumed that
-  // the life of this memory is managed externally and exceeds the lifetime
-  // of this object. Practically, this memory is never released until the
-  // process exits and the OS cleans it up.
-  Histogram(const char* name,
-            Sample minimum,
-            Sample maximum,
-            const BucketRanges* ranges,
-            const DelayedPersistentAllocation& counts,
-            const DelayedPersistentAllocation& logged_counts,
-            HistogramSamples::Metadata* meta,
-            HistogramSamples::Metadata* logged_meta);
-
-  // HistogramBase implementation:
-  void SerializeInfoImpl(base::Pickle* pickle) const override;
-
-  // Method to override to skip the display of the i'th bucket if it's empty.
-  virtual bool PrintEmptyBucket(uint32_t index) const;
-
-  // Get normalized size, relative to the ranges(i).
-  virtual double GetBucketSize(Count current, uint32_t i) const;
-
-  // Return a string description of what goes in a given bucket.
-  // Most commonly this is the numeric value, but in derived classes it may
-  // be a name (or string description) given to the bucket.
-  virtual const std::string GetAsciiBucketRange(uint32_t it) const;
-
- private:
-  // Allow tests to corrupt our innards for testing purposes.
-  FRIEND_TEST_ALL_PREFIXES(HistogramTest, BoundsTest);
-  FRIEND_TEST_ALL_PREFIXES(HistogramTest, BucketPlacementTest);
-  FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptSampleCounts);
-
-  friend class StatisticsRecorder;  // To allow it to delete duplicates.
-  friend class StatisticsRecorderTest;
-
-  friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
-      base::PickleIterator* iter);
-  static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
-
-  // Create a snapshot containing all samples (both logged and unlogged).
-  // Implementation of SnapshotSamples method with a more specific type for
-  // internal use.
-  std::unique_ptr<SampleVector> SnapshotAllSamples() const;
-
-  // Create a copy of unlogged samples.
-  std::unique_ptr<SampleVector> SnapshotUnloggedSamples() const;
-
-  //----------------------------------------------------------------------------
-  // Helpers for emitting Ascii graphic.  Each method appends data to output.
-
-  void WriteAsciiImpl(bool graph_it,
-                      const std::string& newline,
-                      std::string* output) const;
-
-  // Find out how large (graphically) the largest bucket will appear to be.
-  double GetPeakBucketSize(const SampleVectorBase& samples) const;
-
-  // Write a common header message describing this histogram.
-  void WriteAsciiHeader(const SampleVectorBase& samples,
-                        Count sample_count,
-                        std::string* output) const;
-
-  // Write information about previous, current, and next buckets.
-  // Information such as cumulative percentage, etc.
-  void WriteAsciiBucketContext(const int64_t past,
-                               const Count current,
-                               const int64_t remaining,
-                               const uint32_t i,
-                               std::string* output) const;
-
-  // WriteJSON calls these.
-  void GetParameters(DictionaryValue* params) const override;
-
-  void GetCountAndBucketData(Count* count,
-                             int64_t* sum,
-                             ListValue* buckets) const override;
-
-  // Samples that have not yet been logged with SnapshotDelta().
-  std::unique_ptr<SampleVectorBase> unlogged_samples_;
-
-  // Accumulation of all samples that have been logged with SnapshotDelta().
-  std::unique_ptr<SampleVectorBase> logged_samples_;
-
-#if DCHECK_IS_ON()  // Don't waste memory if it won't be used.
-  // Flag to indicate if PrepareFinalDelta has been previously called. It is
-  // used to DCHECK that a final delta is not created multiple times.
-  mutable bool final_delta_created_ = false;
-#endif
-
-  DISALLOW_COPY_AND_ASSIGN(Histogram);
-};
-
-//------------------------------------------------------------------------------
-
-// LinearHistogram is a more traditional histogram, with evenly spaced
-// buckets.
-class BASE_EXPORT LinearHistogram : public Histogram {
- public:
-  ~LinearHistogram() override;
-
-  /* minimum should start from 1. 0 is as minimum is invalid. 0 is an implicit
-     default underflow bucket. */
-  static HistogramBase* FactoryGet(const std::string& name,
-                                   Sample minimum,
-                                   Sample maximum,
-                                   uint32_t bucket_count,
-                                   int32_t flags);
-  static HistogramBase* FactoryTimeGet(const std::string& name,
-                                       TimeDelta minimum,
-                                       TimeDelta maximum,
-                                       uint32_t bucket_count,
-                                       int32_t flags);
-
-  // Overloads of the above two functions that take a const char* |name| param,
-  // to avoid code bloat from the std::string constructor being inlined into
-  // call sites.
-  static HistogramBase* FactoryGet(const char* name,
-                                   Sample minimum,
-                                   Sample maximum,
-                                   uint32_t bucket_count,
-                                   int32_t flags);
-  static HistogramBase* FactoryTimeGet(const char* name,
-                                       TimeDelta minimum,
-                                       TimeDelta maximum,
-                                       uint32_t bucket_count,
-                                       int32_t flags);
-
-  // Create a histogram using data in persistent storage.
-  static std::unique_ptr<HistogramBase> PersistentCreate(
-      const char* name,
-      Sample minimum,
-      Sample maximum,
-      const BucketRanges* ranges,
-      const DelayedPersistentAllocation& counts,
-      const DelayedPersistentAllocation& logged_counts,
-      HistogramSamples::Metadata* meta,
-      HistogramSamples::Metadata* logged_meta);
-
-  struct DescriptionPair {
-    Sample sample;
-    const char* description;  // Null means end of a list of pairs.
-  };
-
-  // Create a LinearHistogram and store a list of number/text values for use in
-  // writing the histogram graph.
-  // |descriptions| can be NULL, which means no special descriptions to set. If
-  // it's not NULL, the last element in the array must has a NULL in its
-  // "description" field.
-  static HistogramBase* FactoryGetWithRangeDescription(
-      const std::string& name,
-      Sample minimum,
-      Sample maximum,
-      uint32_t bucket_count,
-      int32_t flags,
-      const DescriptionPair descriptions[]);
-
-  static void InitializeBucketRanges(Sample minimum,
-                                     Sample maximum,
-                                     BucketRanges* ranges);
-
-  // Overridden from Histogram:
-  HistogramType GetHistogramType() const override;
-
- protected:
-  class Factory;
-
-  LinearHistogram(const char* name,
-                  Sample minimum,
-                  Sample maximum,
-                  const BucketRanges* ranges);
-
-  LinearHistogram(const char* name,
-                  Sample minimum,
-                  Sample maximum,
-                  const BucketRanges* ranges,
-                  const DelayedPersistentAllocation& counts,
-                  const DelayedPersistentAllocation& logged_counts,
-                  HistogramSamples::Metadata* meta,
-                  HistogramSamples::Metadata* logged_meta);
-
-  double GetBucketSize(Count current, uint32_t i) const override;
-
-  // If we have a description for a bucket, then return that.  Otherwise
-  // let parent class provide a (numeric) description.
-  const std::string GetAsciiBucketRange(uint32_t i) const override;
-
-  // Skip printing of name for numeric range if we have a name (and if this is
-  // an empty bucket).
-  bool PrintEmptyBucket(uint32_t index) const override;
-
- private:
-  friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
-      base::PickleIterator* iter);
-  static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
-
-  // For some ranges, we store a printable description of a bucket range.
-  // If there is no description, then GetAsciiBucketRange() uses parent class
-  // to provide a description.
-  typedef std::map<Sample, std::string> BucketDescriptionMap;
-  BucketDescriptionMap bucket_description_;
-
-  DISALLOW_COPY_AND_ASSIGN(LinearHistogram);
-};
-
-//------------------------------------------------------------------------------
-
-// BooleanHistogram is a histogram for booleans.
-class BASE_EXPORT BooleanHistogram : public LinearHistogram {
- public:
-  static HistogramBase* FactoryGet(const std::string& name, int32_t flags);
-
-  // Overload of the above function that takes a const char* |name| param,
-  // to avoid code bloat from the std::string constructor being inlined into
-  // call sites.
-  static HistogramBase* FactoryGet(const char* name, int32_t flags);
-
-  // Create a histogram using data in persistent storage.
-  static std::unique_ptr<HistogramBase> PersistentCreate(
-      const char* name,
-      const BucketRanges* ranges,
-      const DelayedPersistentAllocation& counts,
-      const DelayedPersistentAllocation& logged_counts,
-      HistogramSamples::Metadata* meta,
-      HistogramSamples::Metadata* logged_meta);
-
-  HistogramType GetHistogramType() const override;
-
- protected:
-  class Factory;
-
- private:
-  BooleanHistogram(const char* name, const BucketRanges* ranges);
-  BooleanHistogram(const char* name,
-                   const BucketRanges* ranges,
-                   const DelayedPersistentAllocation& counts,
-                   const DelayedPersistentAllocation& logged_counts,
-                   HistogramSamples::Metadata* meta,
-                   HistogramSamples::Metadata* logged_meta);
-
-  friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
-      base::PickleIterator* iter);
-  static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
-
-  DISALLOW_COPY_AND_ASSIGN(BooleanHistogram);
-};
-
-//------------------------------------------------------------------------------
-
-// CustomHistogram is a histogram for a set of custom integers.
-class BASE_EXPORT CustomHistogram : public Histogram {
- public:
-  // |custom_ranges| contains a vector of limits on ranges. Each limit should be
-  // > 0 and < kSampleType_MAX. (Currently 0 is still accepted for backward
-  // compatibility). The limits can be unordered or contain duplication, but
-  // client should not depend on this.
-  static HistogramBase* FactoryGet(const std::string& name,
-                                   const std::vector<Sample>& custom_ranges,
-                                   int32_t flags);
-
-  // Overload of the above function that takes a const char* |name| param,
-  // to avoid code bloat from the std::string constructor being inlined into
-  // call sites.
-  static HistogramBase* FactoryGet(const char* name,
-                                   const std::vector<Sample>& custom_ranges,
-                                   int32_t flags);
-
-  // Create a histogram using data in persistent storage.
-  static std::unique_ptr<HistogramBase> PersistentCreate(
-      const char* name,
-      const BucketRanges* ranges,
-      const DelayedPersistentAllocation& counts,
-      const DelayedPersistentAllocation& logged_counts,
-      HistogramSamples::Metadata* meta,
-      HistogramSamples::Metadata* logged_meta);
-
-  // Overridden from Histogram:
-  HistogramType GetHistogramType() const override;
-
-  // Helper method for transforming an array of valid enumeration values
-  // to the std::vector<int> expected by UMA_HISTOGRAM_CUSTOM_ENUMERATION.
-  // This function ensures that a guard bucket exists right after any
-  // valid sample value (unless the next higher sample is also a valid value),
-  // so that invalid samples never fall into the same bucket as valid samples.
-  static std::vector<Sample> ArrayToCustomEnumRanges(
-      base::span<const Sample> values);
-
- protected:
-  class Factory;
-
-  CustomHistogram(const char* name, const BucketRanges* ranges);
-
-  CustomHistogram(const char* name,
-                  const BucketRanges* ranges,
-                  const DelayedPersistentAllocation& counts,
-                  const DelayedPersistentAllocation& logged_counts,
-                  HistogramSamples::Metadata* meta,
-                  HistogramSamples::Metadata* logged_meta);
-
-  // HistogramBase implementation:
-  void SerializeInfoImpl(base::Pickle* pickle) const override;
-
-  double GetBucketSize(Count current, uint32_t i) const override;
-
- private:
-  friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
-      base::PickleIterator* iter);
-  static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
-
-  static bool ValidateCustomRanges(const std::vector<Sample>& custom_ranges);
-
-  DISALLOW_COPY_AND_ASSIGN(CustomHistogram);
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_HISTOGRAM_H_
diff --git a/base/metrics/histogram_base.cc b/base/metrics/histogram_base.cc
deleted file mode 100644
index da3ae93..0000000
--- a/base/metrics/histogram_base.cc
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/histogram_base.h"
-
-#include <limits.h>
-
-#include <memory>
-#include <set>
-#include <utility>
-
-#include "base/json/json_string_value_serializer.h"
-#include "base/lazy_instance.h"
-#include "base/logging.h"
-#include "base/metrics/histogram.h"
-#include "base/metrics/histogram_macros.h"
-#include "base/metrics/histogram_samples.h"
-#include "base/metrics/sparse_histogram.h"
-#include "base/metrics/statistics_recorder.h"
-#include "base/pickle.h"
-#include "base/process/process_handle.h"
-#include "base/rand_util.h"
-#include "base/strings/stringprintf.h"
-#include "base/synchronization/lock.h"
-#include "base/values.h"
-
-namespace base {
-
-std::string HistogramTypeToString(HistogramType type) {
-  switch (type) {
-    case HISTOGRAM:
-      return "HISTOGRAM";
-    case LINEAR_HISTOGRAM:
-      return "LINEAR_HISTOGRAM";
-    case BOOLEAN_HISTOGRAM:
-      return "BOOLEAN_HISTOGRAM";
-    case CUSTOM_HISTOGRAM:
-      return "CUSTOM_HISTOGRAM";
-    case SPARSE_HISTOGRAM:
-      return "SPARSE_HISTOGRAM";
-    case DUMMY_HISTOGRAM:
-      return "DUMMY_HISTOGRAM";
-  }
-  NOTREACHED();
-  return "UNKNOWN";
-}
-
-HistogramBase* DeserializeHistogramInfo(PickleIterator* iter) {
-  int type;
-  if (!iter->ReadInt(&type))
-    return nullptr;
-
-  switch (type) {
-    case HISTOGRAM:
-      return Histogram::DeserializeInfoImpl(iter);
-    case LINEAR_HISTOGRAM:
-      return LinearHistogram::DeserializeInfoImpl(iter);
-    case BOOLEAN_HISTOGRAM:
-      return BooleanHistogram::DeserializeInfoImpl(iter);
-    case CUSTOM_HISTOGRAM:
-      return CustomHistogram::DeserializeInfoImpl(iter);
-    case SPARSE_HISTOGRAM:
-      return SparseHistogram::DeserializeInfoImpl(iter);
-    default:
-      return nullptr;
-  }
-}
-
-const HistogramBase::Sample HistogramBase::kSampleType_MAX = INT_MAX;
-
-HistogramBase::HistogramBase(const char* name)
-    : histogram_name_(name), flags_(kNoFlags) {}
-
-HistogramBase::~HistogramBase() = default;
-
-void HistogramBase::CheckName(const StringPiece& name) const {
-  DCHECK_EQ(StringPiece(histogram_name()), name);
-}
-
-void HistogramBase::SetFlags(int32_t flags) {
-  HistogramBase::Count old_flags = subtle::NoBarrier_Load(&flags_);
-  subtle::NoBarrier_Store(&flags_, old_flags | flags);
-}
-
-void HistogramBase::ClearFlags(int32_t flags) {
-  HistogramBase::Count old_flags = subtle::NoBarrier_Load(&flags_);
-  subtle::NoBarrier_Store(&flags_, old_flags & ~flags);
-}
-
-void HistogramBase::AddScaled(Sample value, int count, int scale) {
-  DCHECK_LT(0, scale);
-
-  // Convert raw count and probabilistically round up/down if the remainder
-  // is more than a random number [0, scale). This gives a more accurate
-  // count when there are a large number of records. RandInt is "inclusive",
-  // hence the -1 for the max value.
-  int64_t count_scaled = count / scale;
-  if (count - (count_scaled * scale) > base::RandInt(0, scale - 1))
-    count_scaled += 1;
-  if (count_scaled == 0)
-    return;
-
-  AddCount(value, count_scaled);
-}
-
-void HistogramBase::AddKilo(Sample value, int count) {
-  AddScaled(value, count, 1000);
-}
-
-void HistogramBase::AddKiB(Sample value, int count) {
-  AddScaled(value, count, 1024);
-}
-
-void HistogramBase::AddTimeMillisecondsGranularity(const TimeDelta& time) {
-  Add(static_cast<Sample>(time.InMilliseconds()));
-}
-
-void HistogramBase::AddTimeMicrosecondsGranularity(const TimeDelta& time) {
-  // Intentionally drop high-resolution reports on clients with low-resolution
-  // clocks. High-resolution metrics cannot make use of low-resolution data and
-  // reporting it merely adds noise to the metric. https://crbug.com/807615#c16
-  if (TimeTicks::IsHighResolution())
-    Add(static_cast<Sample>(time.InMicroseconds()));
-}
-
-void HistogramBase::AddBoolean(bool value) {
-  Add(value ? 1 : 0);
-}
-
-void HistogramBase::SerializeInfo(Pickle* pickle) const {
-  pickle->WriteInt(GetHistogramType());
-  SerializeInfoImpl(pickle);
-}
-
-uint32_t HistogramBase::FindCorruption(const HistogramSamples& samples) const {
-  // Not supported by default.
-  return NO_INCONSISTENCIES;
-}
-
-void HistogramBase::ValidateHistogramContents() const {}
-
-void HistogramBase::WriteJSON(std::string* output,
-                              JSONVerbosityLevel verbosity_level) const {
-  Count count;
-  int64_t sum;
-  std::unique_ptr<ListValue> buckets(new ListValue());
-  GetCountAndBucketData(&count, &sum, buckets.get());
-  std::unique_ptr<DictionaryValue> parameters(new DictionaryValue());
-  GetParameters(parameters.get());
-
-  JSONStringValueSerializer serializer(output);
-  DictionaryValue root;
-  root.SetString("name", histogram_name());
-  root.SetInteger("count", count);
-  root.SetDouble("sum", static_cast<double>(sum));
-  root.SetInteger("flags", flags());
-  root.Set("params", std::move(parameters));
-  if (verbosity_level != JSON_VERBOSITY_LEVEL_OMIT_BUCKETS)
-    root.Set("buckets", std::move(buckets));
-  root.SetInteger("pid", GetUniqueIdForProcess());
-  serializer.Serialize(root);
-}
-
-void HistogramBase::FindAndRunCallback(HistogramBase::Sample sample) const {
-  if ((flags() & kCallbackExists) == 0)
-    return;
-
-  StatisticsRecorder::OnSampleCallback cb =
-      StatisticsRecorder::FindCallback(histogram_name());
-  if (!cb.is_null())
-    cb.Run(sample);
-}
-
-void HistogramBase::WriteAsciiBucketGraph(double current_size,
-                                          double max_size,
-                                          std::string* output) const {
-  const int k_line_length = 72;  // Maximal horizontal width of graph.
-  int x_count = static_cast<int>(k_line_length * (current_size / max_size)
-                                 + 0.5);
-  int x_remainder = k_line_length - x_count;
-
-  while (0 < x_count--)
-    output->append("-");
-  output->append("O");
-  while (0 < x_remainder--)
-    output->append(" ");
-}
-
-const std::string HistogramBase::GetSimpleAsciiBucketRange(
-    Sample sample) const {
-  return StringPrintf("%d", sample);
-}
-
-void HistogramBase::WriteAsciiBucketValue(Count current,
-                                          double scaled_sum,
-                                          std::string* output) const {
-  StringAppendF(output, " (%d = %3.1f%%)", current, current/scaled_sum);
-}
-
-// static
-char const* HistogramBase::GetPermanentName(const std::string& name) {
-  // A set of histogram names that provides the "permanent" lifetime required
-  // by histogram objects for those strings that are not already code constants
-  // or held in persistent memory.
-  static LazyInstance<std::set<std::string>>::Leaky permanent_names;
-  static LazyInstance<Lock>::Leaky permanent_names_lock;
-
-  AutoLock lock(permanent_names_lock.Get());
-  auto result = permanent_names.Get().insert(name);
-  return result.first->c_str();
-}
-
-}  // namespace base
diff --git a/base/metrics/histogram_base.h b/base/metrics/histogram_base.h
deleted file mode 100644
index 010dc55..0000000
--- a/base/metrics/histogram_base.h
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_HISTOGRAM_BASE_H_
-#define BASE_METRICS_HISTOGRAM_BASE_H_
-
-#include <limits.h>
-#include <stddef.h>
-#include <stdint.h>
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "base/atomicops.h"
-#include "base/base_export.h"
-#include "base/macros.h"
-#include "base/strings/string_piece.h"
-#include "base/time/time.h"
-
-namespace base {
-
-class DictionaryValue;
-class HistogramBase;
-class HistogramSamples;
-class ListValue;
-class Pickle;
-class PickleIterator;
-
-////////////////////////////////////////////////////////////////////////////////
-// This enum is used to facilitate deserialization of histograms from other
-// processes into the browser. If you create another class that inherits from
-// HistogramBase, add new histogram types and names below.
-
-enum HistogramType {
-  HISTOGRAM,
-  LINEAR_HISTOGRAM,
-  BOOLEAN_HISTOGRAM,
-  CUSTOM_HISTOGRAM,
-  SPARSE_HISTOGRAM,
-  DUMMY_HISTOGRAM,
-};
-
-// Controls the verbosity of the information when the histogram is serialized to
-// a JSON.
-// GENERATED_JAVA_ENUM_PACKAGE: org.chromium.base.metrics
-enum JSONVerbosityLevel {
-  // The histogram is completely serialized.
-  JSON_VERBOSITY_LEVEL_FULL,
-  // The bucket information is not serialized.
-  JSON_VERBOSITY_LEVEL_OMIT_BUCKETS,
-};
-
-std::string HistogramTypeToString(HistogramType type);
-
-// This enum is used for reporting how many histograms and of what types and
-// variations are being created. It has to be in the main .h file so it is
-// visible to files that define the various histogram types.
-enum HistogramReport {
-  // Count the number of reports created. The other counts divided by this
-  // number will give the average per run of the program.
-  HISTOGRAM_REPORT_CREATED = 0,
-
-  // Count the total number of histograms created. It is the limit against
-  // which all others are compared.
-  HISTOGRAM_REPORT_HISTOGRAM_CREATED = 1,
-
-  // Count the total number of histograms looked-up. It's better to cache
-  // the result of a single lookup rather than do it repeatedly.
-  HISTOGRAM_REPORT_HISTOGRAM_LOOKUP = 2,
-
-  // These count the individual histogram types. This must follow the order
-  // of HistogramType above.
-  HISTOGRAM_REPORT_TYPE_LOGARITHMIC = 3,
-  HISTOGRAM_REPORT_TYPE_LINEAR = 4,
-  HISTOGRAM_REPORT_TYPE_BOOLEAN = 5,
-  HISTOGRAM_REPORT_TYPE_CUSTOM = 6,
-  HISTOGRAM_REPORT_TYPE_SPARSE = 7,
-
-  // These indicate the individual flags that were set.
-  HISTOGRAM_REPORT_FLAG_UMA_TARGETED = 8,
-  HISTOGRAM_REPORT_FLAG_UMA_STABILITY = 9,
-  HISTOGRAM_REPORT_FLAG_PERSISTENT = 10,
-
-  // This must be last.
-  HISTOGRAM_REPORT_MAX = 11
-};
-
-// Create or find existing histogram that matches the pickled info.
-// Returns NULL if the pickled data has problems.
-BASE_EXPORT HistogramBase* DeserializeHistogramInfo(base::PickleIterator* iter);
-
-////////////////////////////////////////////////////////////////////////////////
-
-class BASE_EXPORT HistogramBase {
- public:
-  typedef int32_t Sample;                // Used for samples.
-  typedef subtle::Atomic32 AtomicCount;  // Used to count samples.
-  typedef int32_t Count;  // Used to manipulate counts in temporaries.
-
-  static const Sample kSampleType_MAX;  // INT_MAX
-
-  enum Flags {
-    kNoFlags = 0x0,
-
-    // Histogram should be UMA uploaded.
-    kUmaTargetedHistogramFlag = 0x1,
-
-    // Indicates that this is a stability histogram. This flag exists to specify
-    // which histograms should be included in the initial stability log. Please
-    // refer to |MetricsService::PrepareInitialStabilityLog|.
-    kUmaStabilityHistogramFlag = kUmaTargetedHistogramFlag | 0x2,
-
-    // Indicates that the histogram was pickled to be sent across an IPC
-    // Channel. If we observe this flag on a histogram being aggregated into
-    // after IPC, then we are running in a single process mode, and the
-    // aggregation should not take place (as we would be aggregating back into
-    // the source histogram!).
-    kIPCSerializationSourceFlag = 0x10,
-
-    // Indicates that a callback exists for when a new sample is recorded on
-    // this histogram. We store this as a flag with the histogram since
-    // histograms can be in performance critical code, and this allows us
-    // to shortcut looking up the callback if it doesn't exist.
-    kCallbackExists = 0x20,
-
-    // Indicates that the histogram is held in "persistent" memory and may
-    // be accessible between processes. This is only possible if such a
-    // memory segment has been created/attached, used to create a Persistent-
-    // MemoryAllocator, and that loaded into the Histogram module before this
-    // histogram is created.
-    kIsPersistent = 0x40,
-  };
-
-  // Histogram data inconsistency types.
-  enum Inconsistency : uint32_t {
-    NO_INCONSISTENCIES = 0x0,
-    RANGE_CHECKSUM_ERROR = 0x1,
-    BUCKET_ORDER_ERROR = 0x2,
-    COUNT_HIGH_ERROR = 0x4,
-    COUNT_LOW_ERROR = 0x8,
-
-    NEVER_EXCEEDED_VALUE = 0x10,
-  };
-
-  // Construct the base histogram. The name is not copied; it's up to the
-  // caller to ensure that it lives at least as long as this object.
-  explicit HistogramBase(const char* name);
-  virtual ~HistogramBase();
-
-  const char* histogram_name() const { return histogram_name_; }
-
-  // Compares |name| to the histogram name and triggers a DCHECK if they do not
-  // match. This is a helper function used by histogram macros, which results in
-  // in more compact machine code being generated by the macros.
-  virtual void CheckName(const StringPiece& name) const;
-
-  // Get a unique ID for this histogram's samples.
-  virtual uint64_t name_hash() const = 0;
-
-  // Operations with Flags enum.
-  int32_t flags() const { return subtle::NoBarrier_Load(&flags_); }
-  void SetFlags(int32_t flags);
-  void ClearFlags(int32_t flags);
-
-  virtual HistogramType GetHistogramType() const = 0;
-
-  // Whether the histogram has construction arguments as parameters specified.
-  // For histograms that don't have the concept of minimum, maximum or
-  // bucket_count, this function always returns false.
-  virtual bool HasConstructionArguments(
-      Sample expected_minimum,
-      Sample expected_maximum,
-      uint32_t expected_bucket_count) const = 0;
-
-  virtual void Add(Sample value) = 0;
-
-  // In Add function the |value| bucket is increased by one, but in some use
-  // cases we need to increase this value by an arbitrary integer. AddCount
-  // function increases the |value| bucket by |count|. |count| should be greater
-  // than or equal to 1.
-  virtual void AddCount(Sample value, int count) = 0;
-
-  // Similar to above but divides |count| by the |scale| amount. Probabilistic
-  // rounding is used to yield a reasonably accurate total when many samples
-  // are added. Methods for common cases of scales 1000 and 1024 are included.
-  void AddScaled(Sample value, int count, int scale);
-  void AddKilo(Sample value, int count);  // scale=1000
-  void AddKiB(Sample value, int count);   // scale=1024
-
-  // Convenient functions that call Add(Sample).
-  void AddTime(const TimeDelta& time) { AddTimeMillisecondsGranularity(time); }
-  void AddTimeMillisecondsGranularity(const TimeDelta& time);
-  // Note: AddTimeMicrosecondsGranularity() drops the report if this client
-  // doesn't have a high-resolution clock.
-  void AddTimeMicrosecondsGranularity(const TimeDelta& time);
-  void AddBoolean(bool value);
-
-  virtual void AddSamples(const HistogramSamples& samples) = 0;
-  virtual bool AddSamplesFromPickle(base::PickleIterator* iter) = 0;
-
-  // Serialize the histogram info into |pickle|.
-  // Note: This only serializes the construction arguments of the histogram, but
-  // does not serialize the samples.
-  void SerializeInfo(base::Pickle* pickle) const;
-
-  // Try to find out data corruption from histogram and the samples.
-  // The returned value is a combination of Inconsistency enum.
-  virtual uint32_t FindCorruption(const HistogramSamples& samples) const;
-
-  // Snapshot the current complete set of sample data.
-  // Override with atomic/locked snapshot if needed.
-  // NOTE: this data can overflow for long-running sessions. It should be
-  // handled with care and this method is recommended to be used only
-  // in about:histograms and test code.
-  virtual std::unique_ptr<HistogramSamples> SnapshotSamples() const = 0;
-
-  // Calculate the change (delta) in histogram counts since the previous call
-  // to this method. Each successive call will return only those counts
-  // changed since the last call.
-  virtual std::unique_ptr<HistogramSamples> SnapshotDelta() = 0;
-
-  // Calculate the change (delta) in histogram counts since the previous call
-  // to SnapshotDelta() but do so without modifying any internal data as to
-  // what was previous logged. After such a call, no further calls to this
-  // method or to SnapshotDelta() should be done as the result would include
-  // data previously returned. Because no internal data is changed, this call
-  // can be made on "const" histograms such as those with data held in
-  // read-only memory.
-  virtual std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const = 0;
-
-  // The following methods provide graphical histogram displays.
-  virtual void WriteHTMLGraph(std::string* output) const = 0;
-  virtual void WriteAscii(std::string* output) const = 0;
-
-  // TODO(bcwhite): Remove this after https://crbug/836875.
-  virtual void ValidateHistogramContents() const;
-
-  // Produce a JSON representation of the histogram with |verbosity_level| as
-  // the serialization verbosity. This is implemented with the help of
-  // GetParameters and GetCountAndBucketData; overwrite them to customize the
-  // output.
-  void WriteJSON(std::string* output, JSONVerbosityLevel verbosity_level) const;
-
- protected:
-  enum ReportActivity { HISTOGRAM_CREATED, HISTOGRAM_LOOKUP };
-
-  // Subclasses should implement this function to make SerializeInfo work.
-  virtual void SerializeInfoImpl(base::Pickle* pickle) const = 0;
-
-  // Writes information about the construction parameters in |params|.
-  virtual void GetParameters(DictionaryValue* params) const = 0;
-
-  // Writes information about the current (non-empty) buckets and their sample
-  // counts to |buckets|, the total sample count to |count| and the total sum
-  // to |sum|.
-  virtual void GetCountAndBucketData(Count* count,
-                                     int64_t* sum,
-                                     ListValue* buckets) const = 0;
-
-  //// Produce actual graph (set of blank vs non blank char's) for a bucket.
-  void WriteAsciiBucketGraph(double current_size,
-                             double max_size,
-                             std::string* output) const;
-
-  // Return a string description of what goes in a given bucket.
-  const std::string GetSimpleAsciiBucketRange(Sample sample) const;
-
-  // Write textual description of the bucket contents (relative to histogram).
-  // Output is the count in the buckets, as well as the percentage.
-  void WriteAsciiBucketValue(Count current,
-                             double scaled_sum,
-                             std::string* output) const;
-
-  // Retrieves the callback for this histogram, if one exists, and runs it
-  // passing |sample| as the parameter.
-  void FindAndRunCallback(Sample sample) const;
-
-  // Gets a permanent string that can be used for histogram objects when the
-  // original is not a code constant or held in persistent memory.
-  static const char* GetPermanentName(const std::string& name);
-
- private:
-  friend class HistogramBaseTest;
-
-  // A pointer to permanent storage where the histogram name is held. This can
-  // be code space or the output of GetPermanentName() or any other storage
-  // that is known to never change. This is not StringPiece because (a) char*
-  // is 1/2 the size and (b) StringPiece transparently casts from std::string
-  // which can easily lead to a pointer to non-permanent space.
-  // For persistent histograms, this will simply point into the persistent
-  // memory segment, thus avoiding duplication. For heap histograms, the
-  // GetPermanentName method will create the necessary copy.
-  const char* const histogram_name_;
-
-  // Additional information about the histogram.
-  AtomicCount flags_;
-
-  DISALLOW_COPY_AND_ASSIGN(HistogramBase);
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_HISTOGRAM_BASE_H_
diff --git a/base/metrics/histogram_delta_serialization.cc b/base/metrics/histogram_delta_serialization.cc
deleted file mode 100644
index a74b87f..0000000
--- a/base/metrics/histogram_delta_serialization.cc
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/histogram_delta_serialization.h"
-
-#include "base/logging.h"
-#include "base/metrics/histogram_base.h"
-#include "base/metrics/histogram_snapshot_manager.h"
-#include "base/metrics/statistics_recorder.h"
-#include "base/numerics/safe_conversions.h"
-#include "base/pickle.h"
-#include "base/values.h"
-
-namespace base {
-
-namespace {
-
-// Create or find existing histogram and add the samples from pickle.
-// Silently returns when seeing any data problem in the pickle.
-void DeserializeHistogramAndAddSamples(PickleIterator* iter) {
-  HistogramBase* histogram = DeserializeHistogramInfo(iter);
-  if (!histogram)
-    return;
-
-  if (histogram->flags() & HistogramBase::kIPCSerializationSourceFlag) {
-    DVLOG(1) << "Single process mode, histogram observed and not copied: "
-             << histogram->histogram_name();
-    return;
-  }
-  histogram->AddSamplesFromPickle(iter);
-}
-
-}  // namespace
-
-HistogramDeltaSerialization::HistogramDeltaSerialization(
-    const std::string& caller_name)
-    : histogram_snapshot_manager_(this), serialized_deltas_(nullptr) {}
-
-HistogramDeltaSerialization::~HistogramDeltaSerialization() = default;
-
-void HistogramDeltaSerialization::PrepareAndSerializeDeltas(
-    std::vector<std::string>* serialized_deltas,
-    bool include_persistent) {
-  DCHECK(thread_checker_.CalledOnValidThread());
-
-  serialized_deltas_ = serialized_deltas;
-  // Note: Before serializing, we set the kIPCSerializationSourceFlag for all
-  // the histograms, so that the receiving process can distinguish them from the
-  // local histograms.
-  StatisticsRecorder::PrepareDeltas(
-      include_persistent, Histogram::kIPCSerializationSourceFlag,
-      Histogram::kNoFlags, &histogram_snapshot_manager_);
-  serialized_deltas_ = nullptr;
-}
-
-// static
-void HistogramDeltaSerialization::DeserializeAndAddSamples(
-    const std::vector<std::string>& serialized_deltas) {
-  for (std::vector<std::string>::const_iterator it = serialized_deltas.begin();
-       it != serialized_deltas.end(); ++it) {
-    Pickle pickle(it->data(), checked_cast<int>(it->size()));
-    PickleIterator iter(pickle);
-    DeserializeHistogramAndAddSamples(&iter);
-  }
-}
-
-void HistogramDeltaSerialization::RecordDelta(
-    const HistogramBase& histogram,
-    const HistogramSamples& snapshot) {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK_NE(0, snapshot.TotalCount());
-
-  Pickle pickle;
-  histogram.SerializeInfo(&pickle);
-  snapshot.Serialize(&pickle);
-  serialized_deltas_->push_back(
-      std::string(static_cast<const char*>(pickle.data()), pickle.size()));
-}
-
-}  // namespace base
diff --git a/base/metrics/histogram_delta_serialization.h b/base/metrics/histogram_delta_serialization.h
deleted file mode 100644
index 57ebd2c..0000000
--- a/base/metrics/histogram_delta_serialization.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_HISTOGRAM_DELTA_SERIALIZATION_H_
-#define BASE_METRICS_HISTOGRAM_DELTA_SERIALIZATION_H_
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "base/base_export.h"
-#include "base/macros.h"
-#include "base/metrics/histogram_flattener.h"
-#include "base/metrics/histogram_snapshot_manager.h"
-#include "base/threading/thread_checker.h"
-
-namespace base {
-
-class HistogramBase;
-
-// Serializes and restores histograms deltas.
-class BASE_EXPORT HistogramDeltaSerialization : public HistogramFlattener {
- public:
-  // |caller_name| is string used in histograms for counting inconsistencies.
-  explicit HistogramDeltaSerialization(const std::string& caller_name);
-  ~HistogramDeltaSerialization() override;
-
-  // Computes deltas in histogram bucket counts relative to the previous call to
-  // this method. Stores the deltas in serialized form into |serialized_deltas|.
-  // If |serialized_deltas| is null, no data is serialized, though the next call
-  // will compute the deltas relative to this one. Setting |include_persistent|
-  // will include histograms held in persistent memory (and thus may be reported
-  // elsewhere); otherwise only histograms local to this process are serialized.
-  void PrepareAndSerializeDeltas(std::vector<std::string>* serialized_deltas,
-                                 bool include_persistent);
-
-  // Deserialize deltas and add samples to corresponding histograms, creating
-  // them if necessary. Silently ignores errors in |serialized_deltas|.
-  static void DeserializeAndAddSamples(
-      const std::vector<std::string>& serialized_deltas);
-
- private:
-  // HistogramFlattener implementation.
-  void RecordDelta(const HistogramBase& histogram,
-                   const HistogramSamples& snapshot) override;
-
-  ThreadChecker thread_checker_;
-
-  // Calculates deltas in histogram counters.
-  HistogramSnapshotManager histogram_snapshot_manager_;
-
-  // Output buffer for serialized deltas.
-  std::vector<std::string>* serialized_deltas_;
-
-  DISALLOW_COPY_AND_ASSIGN(HistogramDeltaSerialization);
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_HISTOGRAM_DELTA_SERIALIZATION_H_
diff --git a/base/metrics/histogram_flattener.h b/base/metrics/histogram_flattener.h
deleted file mode 100644
index 6a5e3f4..0000000
--- a/base/metrics/histogram_flattener.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_HISTOGRAM_FLATTENER_H_
-#define BASE_METRICS_HISTOGRAM_FLATTENER_H_
-
-#include <map>
-#include <string>
-
-#include "base/macros.h"
-#include "base/metrics/histogram.h"
-
-namespace base {
-
-class HistogramSamples;
-
-// HistogramFlattener is an interface used by HistogramSnapshotManager, which
-// handles the logistics of gathering up available histograms for recording.
-class BASE_EXPORT HistogramFlattener {
- public:
-  virtual void RecordDelta(const HistogramBase& histogram,
-                           const HistogramSamples& snapshot) = 0;
-
- protected:
-  HistogramFlattener() = default;
-  virtual ~HistogramFlattener() = default;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(HistogramFlattener);
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_HISTOGRAM_FLATTENER_H_
diff --git a/base/metrics/histogram_functions.cc b/base/metrics/histogram_functions.cc
deleted file mode 100644
index 31bf219..0000000
--- a/base/metrics/histogram_functions.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/histogram_functions.h"
-
-#include "base/metrics/histogram.h"
-#include "base/metrics/histogram_base.h"
-#include "base/metrics/sparse_histogram.h"
-#include "base/time/time.h"
-
-namespace base {
-
-void UmaHistogramBoolean(const std::string& name, bool sample) {
-  HistogramBase* histogram = BooleanHistogram::FactoryGet(
-      name, HistogramBase::kUmaTargetedHistogramFlag);
-  histogram->Add(sample);
-}
-
-void UmaHistogramExactLinear(const std::string& name,
-                             int sample,
-                             int value_max) {
-  HistogramBase* histogram =
-      LinearHistogram::FactoryGet(name, 1, value_max, value_max + 1,
-                                  HistogramBase::kUmaTargetedHistogramFlag);
-  histogram->Add(sample);
-}
-
-void UmaHistogramPercentage(const std::string& name, int percent) {
-  UmaHistogramExactLinear(name, percent, 100);
-}
-
-void UmaHistogramCustomCounts(const std::string& name,
-                              int sample,
-                              int min,
-                              int max,
-                              int buckets) {
-  HistogramBase* histogram = Histogram::FactoryGet(
-      name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
-  histogram->Add(sample);
-}
-
-void UmaHistogramCounts100(const std::string& name, int sample) {
-  UmaHistogramCustomCounts(name, sample, 1, 100, 50);
-}
-
-void UmaHistogramCounts1000(const std::string& name, int sample) {
-  UmaHistogramCustomCounts(name, sample, 1, 1000, 50);
-}
-
-void UmaHistogramCounts10000(const std::string& name, int sample) {
-  UmaHistogramCustomCounts(name, sample, 1, 10000, 50);
-}
-
-void UmaHistogramCounts100000(const std::string& name, int sample) {
-  UmaHistogramCustomCounts(name, sample, 1, 100000, 50);
-}
-
-void UmaHistogramCounts1M(const std::string& name, int sample) {
-  UmaHistogramCustomCounts(name, sample, 1, 1000000, 50);
-}
-
-void UmaHistogramCounts10M(const std::string& name, int sample) {
-  UmaHistogramCustomCounts(name, sample, 1, 10000000, 50);
-}
-
-void UmaHistogramCustomTimes(const std::string& name,
-                             TimeDelta sample,
-                             TimeDelta min,
-                             TimeDelta max,
-                             int buckets) {
-  HistogramBase* histogram = Histogram::FactoryTimeGet(
-      name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
-  histogram->AddTimeMillisecondsGranularity(sample);
-}
-
-void UmaHistogramTimes(const std::string& name, TimeDelta sample) {
-  UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
-                          TimeDelta::FromSeconds(10), 50);
-}
-
-void UmaHistogramMediumTimes(const std::string& name, TimeDelta sample) {
-  UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
-                          TimeDelta::FromMinutes(3), 50);
-}
-
-void UmaHistogramLongTimes(const std::string& name, TimeDelta sample) {
-  UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
-                          TimeDelta::FromHours(1), 50);
-}
-
-void UmaHistogramMemoryKB(const std::string& name, int sample) {
-  UmaHistogramCustomCounts(name, sample, 1000, 500000, 50);
-}
-
-void UmaHistogramMemoryMB(const std::string& name, int sample) {
-  UmaHistogramCustomCounts(name, sample, 1, 1000, 50);
-}
-
-void UmaHistogramMemoryLargeMB(const std::string& name, int sample) {
-  UmaHistogramCustomCounts(name, sample, 1, 64000, 100);
-}
-
-void UmaHistogramSparse(const std::string& name, int sample) {
-  HistogramBase* histogram = SparseHistogram::FactoryGet(
-      name, HistogramBase::kUmaTargetedHistogramFlag);
-  histogram->Add(sample);
-}
-
-}  // namespace base
diff --git a/base/metrics/histogram_functions.h b/base/metrics/histogram_functions.h
deleted file mode 100644
index 60c0057..0000000
--- a/base/metrics/histogram_functions.h
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_HISTOGRAM_FUNCTIONS_H_
-#define BASE_METRICS_HISTOGRAM_FUNCTIONS_H_
-
-#include "base/metrics/histogram.h"
-#include "base/metrics/histogram_base.h"
-#include "base/time/time.h"
-
-// Functions for recording metrics.
-//
-// For best practices on deciding when to emit to a histogram and what form
-// the histogram should take, see
-// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/histograms/README.md
-
-// Functions for recording UMA histograms. These can be used for cases
-// when the histogram name is generated at runtime. The functionality is
-// equivalent to macros defined in histogram_macros.h but allowing non-constant
-// histogram names. These functions are slower compared to their macro
-// equivalent because the histogram objects are not cached between calls.
-// So, these shouldn't be used in performance critical code.
-namespace base {
-
-// For histograms with linear buckets.
-// Used for capturing integer data with a linear bucketing scheme. This can be
-// used when you want the exact value of some small numeric count, with a max of
-// 100 or less. If you need to capture a range of greater than 100, we recommend
-// the use of the COUNT histograms below.
-// Sample usage:
-//   base::UmaHistogramExactLinear("Histogram.Linear", some_value, 10);
-BASE_EXPORT void UmaHistogramExactLinear(const std::string& name,
-                                         int sample,
-                                         int value_max);
-
-// For adding a sample to an enumerated histogram.
-// Sample usage:
-//   // These values are persisted to logs. Entries should not be renumbered and
-//   // numeric values should never be reused.
-//   enum class MyEnum {
-//     FIRST_VALUE = 0,
-//     SECOND_VALUE = 1,
-//     ...
-//     FINAL_VALUE = N,
-//     COUNT
-//   };
-//   base::UmaHistogramEnumeration("My.Enumeration",
-//                                 MyEnum::SOME_VALUE, MyEnum::COUNT);
-//
-// Note: The value in |sample| must be strictly less than |enum_size|.
-template <typename T>
-void UmaHistogramEnumeration(const std::string& name, T sample, T enum_size) {
-  static_assert(std::is_enum<T>::value,
-                "Non enum passed to UmaHistogramEnumeration");
-  DCHECK_LE(static_cast<uintmax_t>(enum_size), static_cast<uintmax_t>(INT_MAX));
-  DCHECK_LT(static_cast<uintmax_t>(sample), static_cast<uintmax_t>(enum_size));
-  return UmaHistogramExactLinear(name, static_cast<int>(sample),
-                                 static_cast<int>(enum_size));
-}
-
-// Same as above, but uses T::kMaxValue as the inclusive maximum value of the
-// enum.
-template <typename T>
-void UmaHistogramEnumeration(const std::string& name, T sample) {
-  static_assert(std::is_enum<T>::value,
-                "Non enum passed to UmaHistogramEnumeration");
-  DCHECK_LE(static_cast<uintmax_t>(T::kMaxValue),
-            static_cast<uintmax_t>(INT_MAX) - 1);
-  DCHECK_LE(static_cast<uintmax_t>(sample),
-            static_cast<uintmax_t>(T::kMaxValue));
-  return UmaHistogramExactLinear(name, static_cast<int>(sample),
-                                 static_cast<int>(T::kMaxValue) + 1);
-}
-
-// For adding boolean sample to histogram.
-// Sample usage:
-//   base::UmaHistogramBoolean("My.Boolean", true)
-BASE_EXPORT void UmaHistogramBoolean(const std::string& name, bool sample);
-
-// For adding histogram with percent.
-// Percents are integer between 1 and 100.
-// Sample usage:
-//   base::UmaHistogramPercentage("My.Percent", 69)
-BASE_EXPORT void UmaHistogramPercentage(const std::string& name, int percent);
-
-// For adding counts histogram.
-// Sample usage:
-//   base::UmaHistogramCustomCounts("My.Counts", some_value, 1, 600, 30)
-BASE_EXPORT void UmaHistogramCustomCounts(const std::string& name,
-                                          int sample,
-                                          int min,
-                                          int max,
-                                          int buckets);
-
-// Counts specialization for maximum counts 100, 1000, 10k, 100k, 1M and 10M.
-BASE_EXPORT void UmaHistogramCounts100(const std::string& name, int sample);
-BASE_EXPORT void UmaHistogramCounts1000(const std::string& name, int sample);
-BASE_EXPORT void UmaHistogramCounts10000(const std::string& name, int sample);
-BASE_EXPORT void UmaHistogramCounts100000(const std::string& name, int sample);
-BASE_EXPORT void UmaHistogramCounts1M(const std::string& name, int sample);
-BASE_EXPORT void UmaHistogramCounts10M(const std::string& name, int sample);
-
-// For histograms storing times.
-BASE_EXPORT void UmaHistogramCustomTimes(const std::string& name,
-                                         TimeDelta sample,
-                                         TimeDelta min,
-                                         TimeDelta max,
-                                         int buckets);
-// For short timings from 1 ms up to 10 seconds (50 buckets).
-BASE_EXPORT void UmaHistogramTimes(const std::string& name, TimeDelta sample);
-// For medium timings up to 3 minutes (50 buckets).
-BASE_EXPORT void UmaHistogramMediumTimes(const std::string& name,
-                                         TimeDelta sample);
-// For time intervals up to 1 hr (50 buckets).
-BASE_EXPORT void UmaHistogramLongTimes(const std::string& name,
-                                       TimeDelta sample);
-
-// For recording memory related histograms.
-// Used to measure common KB-granularity memory stats. Range is up to 500M.
-BASE_EXPORT void UmaHistogramMemoryKB(const std::string& name, int sample);
-// Used to measure common MB-granularity memory stats. Range is up to ~1G.
-BASE_EXPORT void UmaHistogramMemoryMB(const std::string& name, int sample);
-// Used to measure common MB-granularity memory stats. Range is up to ~64G.
-BASE_EXPORT void UmaHistogramMemoryLargeMB(const std::string& name, int sample);
-
-// For recording sparse histograms.
-// The |sample| can be a negative or non-negative number.
-//
-// Sparse histograms are well suited for recording counts of exact sample values
-// that are sparsely distributed over a relatively large range, in cases where
-// ultra-fast performance is not critical. For instance, Sqlite.Version.* are
-// sparse because for any given database, there's going to be exactly one
-// version logged.
-//
-// Performance:
-// ------------
-// Sparse histograms are typically more memory-efficient but less time-efficient
-// than other histograms. Essentially, they sparse histograms use a map rather
-// than a vector for their backing storage; they also require lock acquisition
-// to increment a sample, whereas other histogram do not. Hence, each increment
-// operation is a bit slower than for other histograms. But, if the data is
-// sparse, then they use less memory client-side, because they allocate buckets
-// on demand rather than preallocating.
-//
-// Data size:
-// ----------
-// Note that server-side, we still need to load all buckets, across all users,
-// at once. Thus, please avoid exploding such histograms, i.e. uploading many
-// many distinct values to the server (across all users). Concretely, keep the
-// number of distinct values <= 100 ideally, definitely <= 1000. If you have no
-// guarantees on the range of your data, use clamping, e.g.:
-//   UmaHistogramSparse("MyHistogram", ClampToRange(value, 0, 200));
-BASE_EXPORT void UmaHistogramSparse(const std::string& name, int sample);
-
-}  // namespace base
-
-#endif  // BASE_METRICS_HISTOGRAM_FUNCTIONS_H_
diff --git a/base/metrics/histogram_macros.h b/base/metrics/histogram_macros.h
deleted file mode 100644
index 0960b19..0000000
--- a/base/metrics/histogram_macros.h
+++ /dev/null
@@ -1,359 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_HISTOGRAM_MACROS_H_
-#define BASE_METRICS_HISTOGRAM_MACROS_H_
-
-#include "base/macros.h"
-#include "base/metrics/histogram.h"
-#include "base/metrics/histogram_macros_internal.h"
-#include "base/metrics/histogram_macros_local.h"
-#include "base/time/time.h"
-
-
-// Macros for efficient use of histograms.
-//
-// For best practices on deciding when to emit to a histogram and what form
-// the histogram should take, see
-// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/histograms/README.md
-
-// TODO(rkaplow): Link to proper documentation on metric creation once we have
-// it in a good state.
-
-// All of these macros must be called with |name| as a runtime constant - it
-// doesn't have to literally be a constant, but it must be the same string on
-// all calls from a particular call site. If this rule is violated, it is
-// possible the data will be written to the wrong histogram.
-
-//------------------------------------------------------------------------------
-// Enumeration histograms.
-
-// These macros create histograms for enumerated data. Ideally, the data should
-// be of the form of "event occurs, log the result". We recommended not putting
-// related but not directly connected data as enums within the same histogram.
-// You should be defining an associated Enum, and the input sample should be
-// an element of the Enum.
-// All of these macros must be called with |name| as a runtime constant.
-
-// The first variant of UMA_HISTOGRAM_ENUMERATION accepts two arguments: the
-// histogram name and the enum sample. It deduces the correct boundary value to
-// use by looking for an enumerator with the name kMaxValue. kMaxValue should
-// share the value of the highest enumerator: this avoids switch statements
-// having to handle a sentinel no-op value.
-//
-// Sample usage:
-//   // These values are persisted to logs. Entries should not be renumbered and
-//   // numeric values should never be reused.
-//   enum class MyEnum {
-//     kFirstValue = 0,
-//     kSecondValue = 1,
-//     ...
-//     kFinalValue = N,
-//     kMaxValue = kFinalValue,
-//   };
-//   UMA_HISTOGRAM_ENUMERATION("My.Enumeration", MyEnum::kSomeValue);
-//
-// The second variant requires three arguments: the first two are the same as
-// before, and the third argument is the enum boundary: this must be strictly
-// greater than any other enumerator that will be sampled.
-//
-// Sample usage:
-//   // These values are persisted to logs. Entries should not be renumbered and
-//   // numeric values should never be reused.
-//   enum class MyEnum {
-//     FIRST_VALUE = 0,
-//     SECOND_VALUE = 1,
-//     ...
-//     FINAL_VALUE = N,
-//     COUNT
-//   };
-//   UMA_HISTOGRAM_ENUMERATION("My.Enumeration",
-//                             MyEnum::SOME_VALUE, MyEnum::COUNT);
-//
-// Note: If the enum is used in a switch, it is often desirable to avoid writing
-// a case statement to handle an unused sentinel value (i.e. COUNT in the above
-// example). For scoped enums, this is awkward since it requires casting the
-// enum to an arithmetic type and adding one. Instead, prefer the two argument
-// version of the macro which automatically deduces the boundary from kMaxValue.
-#define UMA_HISTOGRAM_ENUMERATION(name, ...)                            \
-  CR_EXPAND_ARG(INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO(           \
-      __VA_ARGS__, INTERNAL_UMA_HISTOGRAM_ENUMERATION_SPECIFY_BOUNDARY, \
-      INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY)(              \
-      name, __VA_ARGS__, base::HistogramBase::kUmaTargetedHistogramFlag))
-
-// Histogram for boolean values.
-
-// Sample usage:
-//   UMA_HISTOGRAM_BOOLEAN("Histogram.Boolean", bool);
-#define UMA_HISTOGRAM_BOOLEAN(name, sample)                                    \
-    STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample),                   \
-        base::BooleanHistogram::FactoryGet(name,                               \
-            base::HistogramBase::kUmaTargetedHistogramFlag))
-
-//------------------------------------------------------------------------------
-// Linear histograms.
-
-// All of these macros must be called with |name| as a runtime constant.
-
-// Used for capturing integer data with a linear bucketing scheme. This can be
-// used when you want the exact value of some small numeric count, with a max of
-// 100 or less. If you need to capture a range of greater than 100, we recommend
-// the use of the COUNT histograms below.
-
-// Sample usage:
-//   UMA_HISTOGRAM_EXACT_LINEAR("Histogram.Linear", count, 10);
-#define UMA_HISTOGRAM_EXACT_LINEAR(name, sample, value_max) \
-  INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(                \
-      name, sample, value_max, base::HistogramBase::kUmaTargetedHistogramFlag)
-
-// Used for capturing basic percentages. This will be 100 buckets of size 1.
-
-// Sample usage:
-//   UMA_HISTOGRAM_PERCENTAGE("Histogram.Percent", percent_as_int);
-#define UMA_HISTOGRAM_PERCENTAGE(name, percent_as_int) \
-  UMA_HISTOGRAM_EXACT_LINEAR(name, percent_as_int, 101)
-
-//------------------------------------------------------------------------------
-// Count histograms. These are used for collecting numeric data. Note that we
-// have macros for more specialized use cases below (memory, time, percentages).
-
-// The number suffixes here refer to the max size of the sample, i.e. COUNT_1000
-// will be able to collect samples of counts up to 1000. The default number of
-// buckets in all default macros is 50. We recommend erring on the side of too
-// large a range versus too short a range.
-// These macros default to exponential histograms - i.e. the lengths of the
-// bucket ranges exponentially increase as the sample range increases.
-// These should *not* be used if you are interested in exact counts, i.e. a
-// bucket range of 1. In these cases, you should use the ENUMERATION macros
-// defined later. These should also not be used to capture the number of some
-// event, i.e. "button X was clicked N times". In this cases, an enum should be
-// used, ideally with an appropriate baseline enum entry included.
-// All of these macros must be called with |name| as a runtime constant.
-
-// Sample usage:
-//   UMA_HISTOGRAM_COUNTS_1M("My.Histogram", sample);
-
-#define UMA_HISTOGRAM_COUNTS_100(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(    \
-    name, sample, 1, 100, 50)
-
-#define UMA_HISTOGRAM_COUNTS_1000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(   \
-    name, sample, 1, 1000, 50)
-
-#define UMA_HISTOGRAM_COUNTS_10000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(  \
-    name, sample, 1, 10000, 50)
-
-#define UMA_HISTOGRAM_COUNTS_100000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
-    name, sample, 1, 100000, 50)
-
-#define UMA_HISTOGRAM_COUNTS_1M(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(     \
-    name, sample, 1, 1000000, 50)
-
-#define UMA_HISTOGRAM_COUNTS_10M(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(    \
-    name, sample, 1, 10000000, 50)
-
-// This can be used when the default ranges are not sufficient. This macro lets
-// the metric developer customize the min and max of the sampled range, as well
-// as the number of buckets recorded.
-// Any data outside the range here will be put in underflow and overflow
-// buckets. Min values should be >=1 as emitted 0s will still go into the
-// underflow bucket.
-
-// Sample usage:
-//   UMA_HISTOGRAM_CUSTOM_COUNTS("My.Histogram", 1, 100000000, 100);
-#define UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count)      \
-    INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG(                                \
-        name, sample, min, max, bucket_count,                                  \
-        base::HistogramBase::kUmaTargetedHistogramFlag)
-
-//------------------------------------------------------------------------------
-// Timing histograms. These are used for collecting timing data (generally
-// latencies).
-
-// These macros create exponentially sized histograms (lengths of the bucket
-// ranges exponentially increase as the sample range increases). The input
-// sample is a base::TimeDelta. The output data is measured in ms granularity.
-// All of these macros must be called with |name| as a runtime constant.
-
-// Sample usage:
-//   UMA_HISTOGRAM_TIMES("My.Timing.Histogram", time_delta);
-
-// Short timings - up to 10 seconds. For high-resolution (microseconds) timings,
-// see UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES.
-#define UMA_HISTOGRAM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES(          \
-    name, sample, base::TimeDelta::FromMilliseconds(1),                        \
-    base::TimeDelta::FromSeconds(10), 50)
-
-// Medium timings - up to 3 minutes. Note this starts at 10ms (no good reason,
-// but not worth changing).
-#define UMA_HISTOGRAM_MEDIUM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES(   \
-    name, sample, base::TimeDelta::FromMilliseconds(10),                       \
-    base::TimeDelta::FromMinutes(3), 50)
-
-// Long timings - up to an hour.
-#define UMA_HISTOGRAM_LONG_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES(     \
-    name, sample, base::TimeDelta::FromMilliseconds(1),                        \
-    base::TimeDelta::FromHours(1), 50)
-
-// Long timings with higher granularity - up to an hour with 100 buckets.
-#define UMA_HISTOGRAM_LONG_TIMES_100(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
-    name, sample, base::TimeDelta::FromMilliseconds(1),                        \
-    base::TimeDelta::FromHours(1), 100)
-
-// This can be used when the default ranges are not sufficient. This macro lets
-// the metric developer customize the min and max of the sampled range, as well
-// as the number of buckets recorded.
-
-// Sample usage:
-//   UMA_HISTOGRAM_CUSTOM_TIMES("Very.Long.Timing.Histogram", time_delta,
-//       base::TimeDelta::FromSeconds(1), base::TimeDelta::FromDays(1), 100);
-#define UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
-  STATIC_HISTOGRAM_POINTER_BLOCK(                                        \
-      name, AddTimeMillisecondsGranularity(sample),                      \
-      base::Histogram::FactoryTimeGet(                                   \
-          name, min, max, bucket_count,                                  \
-          base::HistogramBase::kUmaTargetedHistogramFlag))
-
-// Same as UMA_HISTOGRAM_CUSTOM_TIMES but reports |sample| in microseconds,
-// dropping the report if this client doesn't have a high-resolution clock.
-//
-// Note: dropping reports on clients with low-resolution clocks means these
-// reports will be biased to a portion of the population on Windows. See
-// Windows.HasHighResolutionTimeTicks for the affected sample.
-//
-// Sample usage:
-//  UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(
-//      "High.Resolution.TimingMicroseconds.Histogram", time_delta,
-//      base::TimeDelta::FromMicroseconds(1),
-//      base::TimeDelta::FromMilliseconds(10), 100);
-#define UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(name, sample, min, max, \
-                                                bucket_count)           \
-  STATIC_HISTOGRAM_POINTER_BLOCK(                                       \
-      name, AddTimeMicrosecondsGranularity(sample),                     \
-      base::Histogram::FactoryMicrosecondsTimeGet(                      \
-          name, min, max, bucket_count,                                 \
-          base::HistogramBase::kUmaTargetedHistogramFlag))
-
-// Scoped class which logs its time on this earth as a UMA statistic. This is
-// recommended for when you want a histogram which measures the time it takes
-// for a method to execute. This measures up to 10 seconds. It uses
-// UMA_HISTOGRAM_TIMES under the hood.
-
-// Sample usage:
-//   void Function() {
-//     SCOPED_UMA_HISTOGRAM_TIMER("Component.FunctionTime");
-//     ...
-//   }
-#define SCOPED_UMA_HISTOGRAM_TIMER(name)                                       \
-  INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, false, __COUNTER__)
-
-// Similar scoped histogram timer, but this uses UMA_HISTOGRAM_LONG_TIMES_100,
-// which measures up to an hour, and uses 100 buckets. This is more expensive
-// to store, so only use if this often takes >10 seconds.
-#define SCOPED_UMA_HISTOGRAM_LONG_TIMER(name)                                  \
-  INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, true, __COUNTER__)
-
-
-//------------------------------------------------------------------------------
-// Memory histograms.
-
-// These macros create exponentially sized histograms (lengths of the bucket
-// ranges exponentially increase as the sample range increases). The input
-// sample must be a number measured in kilobytes.
-// All of these macros must be called with |name| as a runtime constant.
-
-// Sample usage:
-//   UMA_HISTOGRAM_MEMORY_KB("My.Memory.Histogram", memory_in_kb);
-
-// Used to measure common KB-granularity memory stats. Range is up to 500000KB -
-// approximately 500M.
-#define UMA_HISTOGRAM_MEMORY_KB(name, sample)                                  \
-    UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1000, 500000, 50)
-
-// Used to measure common MB-granularity memory stats. Range is up to ~64G.
-#define UMA_HISTOGRAM_MEMORY_LARGE_MB(name, sample)                            \
-    UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 64000, 100)
-
-
-//------------------------------------------------------------------------------
-// Stability-specific histograms.
-
-// Histograms logged in as stability histograms will be included in the initial
-// stability log. See comments by declaration of
-// MetricsService::PrepareInitialStabilityLog().
-// All of these macros must be called with |name| as a runtime constant.
-
-// For details on usage, see the documentation on the non-stability equivalents.
-
-#define UMA_STABILITY_HISTOGRAM_COUNTS_100(name, sample)                       \
-    UMA_STABILITY_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
-
-#define UMA_STABILITY_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max,          \
-                                              bucket_count)                    \
-    INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG(                                \
-        name, sample, min, max, bucket_count,                                  \
-        base::HistogramBase::kUmaStabilityHistogramFlag)
-
-#define UMA_STABILITY_HISTOGRAM_ENUMERATION(name, sample, enum_max)            \
-    INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(                                  \
-        name, sample, enum_max,                                                \
-        base::HistogramBase::kUmaStabilityHistogramFlag)
-
-//------------------------------------------------------------------------------
-// Histogram instantiation helpers.
-
-// Support a collection of histograms, perhaps one for each entry in an
-// enumeration. This macro manages a block of pointers, adding to a specific
-// one by its index.
-//
-// A typical instantiation looks something like this:
-//  STATIC_HISTOGRAM_POINTER_GROUP(
-//      GetHistogramNameForIndex(histogram_index),
-//      histogram_index, MAXIMUM_HISTOGRAM_INDEX, Add(some_delta),
-//      base::Histogram::FactoryGet(
-//          GetHistogramNameForIndex(histogram_index),
-//          MINIMUM_SAMPLE, MAXIMUM_SAMPLE, BUCKET_COUNT,
-//          base::HistogramBase::kUmaTargetedHistogramFlag));
-//
-// Though it seems inefficient to generate the name twice, the first
-// instance will be used only for DCHECK builds and the second will
-// execute only during the first access to the given index, after which
-// the pointer is cached and the name never needed again.
-#define STATIC_HISTOGRAM_POINTER_GROUP(constant_histogram_name, index,        \
-                                       constant_maximum,                      \
-                                       histogram_add_method_invocation,       \
-                                       histogram_factory_get_invocation)      \
-  do {                                                                        \
-    static base::subtle::AtomicWord atomic_histograms[constant_maximum];      \
-    DCHECK_LE(0, index);                                                      \
-    DCHECK_LT(index, constant_maximum);                                       \
-    HISTOGRAM_POINTER_USE(&atomic_histograms[index], constant_histogram_name, \
-                          histogram_add_method_invocation,                    \
-                          histogram_factory_get_invocation);                  \
-  } while (0)
-
-//------------------------------------------------------------------------------
-// Deprecated histogram macros. Not recommended for current use.
-
-// Legacy name for UMA_HISTOGRAM_COUNTS_1M. Suggest using explicit naming
-// and not using this macro going forward.
-#define UMA_HISTOGRAM_COUNTS(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(        \
-    name, sample, 1, 1000000, 50)
-
-// MB-granularity memory metric. This has a short max (1G).
-#define UMA_HISTOGRAM_MEMORY_MB(name, sample)                                  \
-    UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 1000, 50)
-
-// For an enum with customized range. In general, sparse histograms should be
-// used instead.
-// Samples should be one of the std::vector<int> list provided via
-// |custom_ranges|. See comments above CustomRanges::FactoryGet about the
-// requirement of |custom_ranges|. You can use the helper function
-// CustomHistogram::ArrayToCustomEnumRanges to transform a C-style array of
-// valid sample values to a std::vector<int>.
-#define UMA_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges)          \
-    STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample),                          \
-        base::CustomHistogram::FactoryGet(name, custom_ranges,                 \
-            base::HistogramBase::kUmaTargetedHistogramFlag))
-
-#endif  // BASE_METRICS_HISTOGRAM_MACROS_H_
diff --git a/base/metrics/histogram_macros_internal.h b/base/metrics/histogram_macros_internal.h
deleted file mode 100644
index ff3702b..0000000
--- a/base/metrics/histogram_macros_internal.h
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
-#define BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
-
-#include <stdint.h>
-
-#include <limits>
-#include <type_traits>
-
-#include "base/atomicops.h"
-#include "base/logging.h"
-#include "base/metrics/histogram.h"
-#include "base/metrics/sparse_histogram.h"
-#include "base/time/time.h"
-
-// This is for macros and helpers internal to base/metrics. They should not be
-// used outside of this directory. For writing to UMA histograms, see
-// histogram_macros.h.
-
-namespace base {
-namespace internal {
-
-// Helper traits for deducing the boundary value for enums.
-template <typename Enum, typename SFINAE = void>
-struct EnumSizeTraits {
-  static constexpr Enum Count() {
-    static_assert(
-        sizeof(Enum) == 0,
-        "enumerator must define kMaxValue enumerator to use this macro!");
-    return Enum();
-  }
-};
-
-// Since the UMA histogram macros expect a value one larger than the max defined
-// enumerator value, add one.
-template <typename Enum>
-struct EnumSizeTraits<
-    Enum,
-    std::enable_if_t<std::is_enum<decltype(Enum::kMaxValue)>::value>> {
-  static constexpr Enum Count() {
-    return static_cast<Enum>(
-        static_cast<std::underlying_type_t<Enum>>(Enum::kMaxValue) + 1);
-  }
-};
-
-}  // namespace internal
-}  // namespace base
-
-// TODO(rkaplow): Improve commenting of these methods.
-//------------------------------------------------------------------------------
-// Histograms are often put in areas where they are called many many times, and
-// performance is critical.  As a result, they are designed to have a very low
-// recurring cost of executing (adding additional samples). Toward that end,
-// the macros declare a static pointer to the histogram in question, and only
-// take a "slow path" to construct (or find) the histogram on the first run
-// through the macro. We leak the histograms at shutdown time so that we don't
-// have to validate using the pointers at any time during the running of the
-// process.
-
-// In some cases (integration into 3rd party code), it's useful to separate the
-// definition of |atomic_histogram_pointer| from its use. To achieve this we
-// define HISTOGRAM_POINTER_USE, which uses an |atomic_histogram_pointer|, and
-// STATIC_HISTOGRAM_POINTER_BLOCK, which defines an |atomic_histogram_pointer|
-// and forwards to HISTOGRAM_POINTER_USE.
-#define HISTOGRAM_POINTER_USE(atomic_histogram_pointer,                        \
-                              constant_histogram_name,                         \
-                              histogram_add_method_invocation,                 \
-                              histogram_factory_get_invocation)                \
-  do {                                                                         \
-    /*                                                                         \
-     * Acquire_Load() ensures that we acquire visibility to the                \
-     * pointed-to data in the histogram.                                       \
-     */                                                                        \
-    base::HistogramBase* histogram_pointer(                                    \
-        reinterpret_cast<base::HistogramBase*>(                                \
-            base::subtle::Acquire_Load(atomic_histogram_pointer)));            \
-    if (!histogram_pointer) {                                                  \
-      /*                                                                       \
-       * This is the slow path, which will construct OR find the               \
-       * matching histogram.  histogram_factory_get_invocation includes        \
-       * locks on a global histogram name map and is completely thread         \
-       * safe.                                                                 \
-       */                                                                      \
-      histogram_pointer = histogram_factory_get_invocation;                    \
-                                                                               \
-      /*                                                                       \
-       * Use Release_Store to ensure that the histogram data is made           \
-       * available globally before we make the pointer visible. Several        \
-       * threads may perform this store, but the same value will be            \
-       * stored in all cases (for a given named/spec'ed histogram).            \
-       * We could do this without any barrier, since FactoryGet entered        \
-       * and exited a lock after construction, but this barrier makes          \
-       * things clear.                                                         \
-       */                                                                      \
-      base::subtle::Release_Store(                                             \
-          atomic_histogram_pointer,                                            \
-          reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));      \
-    }                                                                          \
-    if (DCHECK_IS_ON())                                                        \
-      histogram_pointer->CheckName(constant_histogram_name);                   \
-    histogram_pointer->histogram_add_method_invocation;                        \
-  } while (0)
-
-// This is a helper macro used by other macros and shouldn't be used directly.
-// Defines the static |atomic_histogram_pointer| and forwards to
-// HISTOGRAM_POINTER_USE.
-#define STATIC_HISTOGRAM_POINTER_BLOCK(constant_histogram_name,                \
-                                       histogram_add_method_invocation,        \
-                                       histogram_factory_get_invocation)       \
-  do {                                                                         \
-    /*                                                                         \
-     * The pointer's presence indicates that the initialization is complete.   \
-     * Initialization is idempotent, so it can safely be atomically repeated.  \
-     */                                                                        \
-    static base::subtle::AtomicWord atomic_histogram_pointer = 0;              \
-    HISTOGRAM_POINTER_USE(&atomic_histogram_pointer, constant_histogram_name,  \
-                          histogram_add_method_invocation,                     \
-                          histogram_factory_get_invocation);                   \
-  } while (0)
-
-// This is a helper macro used by other macros and shouldn't be used directly.
-#define INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG(name, sample, min, max,     \
-                                                   bucket_count, flag)         \
-    STATIC_HISTOGRAM_POINTER_BLOCK(                                            \
-        name, Add(sample),                                                     \
-        base::Histogram::FactoryGet(name, min, max, bucket_count, flag))
-
-// This is a helper macro used by other macros and shouldn't be used directly.
-// The bucketing scheme is linear with a bucket size of 1. For N items,
-// recording values in the range [0, N - 1] creates a linear histogram with N +
-// 1 buckets:
-//   [0, 1), [1, 2), ..., [N - 1, N)
-// and an overflow bucket [N, infinity).
-//
-// Code should never emit to the overflow bucket; only to the other N buckets.
-// This allows future versions of Chrome to safely increase the boundary size.
-// Otherwise, the histogram would have [N - 1, infinity) as its overflow bucket,
-// and so the maximal value (N - 1) would be emitted to this overflow bucket.
-// But, if an additional value were later added, the bucket label for
-// the value (N - 1) would change to [N - 1, N), which would result in different
-// versions of Chrome using different bucket labels for identical data.
-#define INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(name, sample, boundary,  \
-                                                  flag)                    \
-  do {                                                                     \
-    static_assert(!std::is_enum<decltype(sample)>::value,                  \
-                  "|sample| should not be an enum type!");                 \
-    static_assert(!std::is_enum<decltype(boundary)>::value,                \
-                  "|boundary| should not be an enum type!");               \
-    STATIC_HISTOGRAM_POINTER_BLOCK(                                        \
-        name, Add(sample),                                                 \
-        base::LinearHistogram::FactoryGet(name, 1, boundary, boundary + 1, \
-                                          flag));                          \
-  } while (0)
-
-// Helper for 'overloading' UMA_HISTOGRAM_ENUMERATION with a variable number of
-// arguments.
-#define INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO(_1, _2, NAME, ...) NAME
-
-#define INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY(name, sample,       \
-                                                           flags)              \
-  INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(                                    \
-      name, sample, base::internal::EnumSizeTraits<decltype(sample)>::Count(), \
-      flags)
-
-// Note: The value in |sample| must be strictly less than |enum_size|.
-#define INTERNAL_UMA_HISTOGRAM_ENUMERATION_SPECIFY_BOUNDARY(name, sample,     \
-                                                            enum_size, flags) \
-  INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, enum_size, flags)
-
-// Similar to the previous macro but intended for enumerations. This delegates
-// the work to the previous macro, but supports scoped enumerations as well by
-// forcing an explicit cast to the HistogramBase::Sample integral type.
-//
-// Note the range checks verify two separate issues:
-// - that the declared enum size isn't out of range of HistogramBase::Sample
-// - that the declared enum size is > 0
-//
-// TODO(dcheng): This should assert that the passed in types are actually enum
-// types.
-#define INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary, flag) \
-  do {                                                                         \
-    using decayed_sample = std::decay<decltype(sample)>::type;                 \
-    using decayed_boundary = std::decay<decltype(boundary)>::type;             \
-    static_assert(!std::is_enum<decayed_boundary>::value ||                    \
-                      std::is_enum<decayed_sample>::value,                     \
-                  "Unexpected: |boundary| is enum, but |sample| is not.");     \
-    static_assert(!std::is_enum<decayed_sample>::value ||                      \
-                      !std::is_enum<decayed_boundary>::value ||                \
-                      std::is_same<decayed_sample, decayed_boundary>::value,   \
-                  "|sample| and |boundary| shouldn't be of different enums");  \
-    static_assert(                                                             \
-        static_cast<uintmax_t>(boundary) <                                     \
-            static_cast<uintmax_t>(                                            \
-                std::numeric_limits<base::HistogramBase::Sample>::max()),      \
-        "|boundary| is out of range of HistogramBase::Sample");                \
-    INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(                                 \
-        name, static_cast<base::HistogramBase::Sample>(sample),                \
-        static_cast<base::HistogramBase::Sample>(boundary), flag);             \
-  } while (0)
-
-// This is a helper macro used by other macros and shouldn't be used directly.
-// This is necessary to expand __COUNTER__ to an actual value.
-#define INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, is_long, key)       \
-  INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key)
-
-// This is a helper macro used by other macros and shouldn't be used directly.
-#define INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key)         \
-  class ScopedHistogramTimer##key {                                            \
-   public:                                                                     \
-    ScopedHistogramTimer##key() : constructed_(base::TimeTicks::Now()) {}      \
-    ~ScopedHistogramTimer##key() {                                             \
-      base::TimeDelta elapsed = base::TimeTicks::Now() - constructed_;         \
-      if (is_long) {                                                           \
-        UMA_HISTOGRAM_LONG_TIMES_100(name, elapsed);                           \
-      } else {                                                                 \
-        UMA_HISTOGRAM_TIMES(name, elapsed);                                    \
-      }                                                                        \
-    }                                                                          \
-   private:                                                                    \
-    base::TimeTicks constructed_;                                              \
-  } scoped_histogram_timer_##key
-
-#endif  // BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
diff --git a/base/metrics/histogram_macros_local.h b/base/metrics/histogram_macros_local.h
deleted file mode 100644
index c4d333b..0000000
--- a/base/metrics/histogram_macros_local.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
-#define BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
-
-#include "base/logging.h"
-#include "base/metrics/histogram.h"
-#include "base/metrics/histogram_macros_internal.h"
-#include "base/time/time.h"
-
-// TODO(rkaplow): Migrate all LOCAL_* usage within Chromium to include this
-// file instead of the histogram_macros.h file.
-
-//------------------------------------------------------------------------------
-// Enumeration histograms.
-//
-// For usage details, see the equivalents in histogram_macros.h.
-
-#define LOCAL_HISTOGRAM_ENUMERATION(name, ...)                          \
-  CR_EXPAND_ARG(INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO(           \
-      __VA_ARGS__, INTERNAL_UMA_HISTOGRAM_ENUMERATION_SPECIFY_BOUNDARY, \
-      INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY)(              \
-      name, __VA_ARGS__, base::HistogramBase::kNoFlags))
-
-#define LOCAL_HISTOGRAM_BOOLEAN(name, sample)                                  \
-    STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample),                   \
-        base::BooleanHistogram::FactoryGet(name, base::Histogram::kNoFlags))
-
-//------------------------------------------------------------------------------
-// Percentage histograms.
-//
-// For usage details, see the equivalents in histogram_macros.h
-
-#define LOCAL_HISTOGRAM_PERCENTAGE(name, under_one_hundred)                    \
-    LOCAL_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
-
-//------------------------------------------------------------------------------
-// Count histograms. These are used for collecting numeric data. Note that we
-// have macros for more specialized use cases below (memory, time, percentages).
-// For usage details, see the equivalents in histogram_macros.h.
-
-#define LOCAL_HISTOGRAM_COUNTS_100(name, sample)                               \
-    LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
-
-#define LOCAL_HISTOGRAM_COUNTS_10000(name, sample)                             \
-    LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 10000, 50)
-
-#define LOCAL_HISTOGRAM_COUNTS_1000000(name, sample)                           \
-    LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 1000000, 50)
-
-#define LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count)    \
-    INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG(                                \
-        name, sample, min, max, bucket_count, base::HistogramBase::kNoFlags)
-
-//------------------------------------------------------------------------------
-// Timing histograms. These are used for collecting timing data (generally
-// latencies).
-//
-// For usage details, see the equivalents in histogram_macros.h.
-
-#define LOCAL_HISTOGRAM_TIMES(name, sample) LOCAL_HISTOGRAM_CUSTOM_TIMES(      \
-    name, sample, base::TimeDelta::FromMilliseconds(1),                        \
-    base::TimeDelta::FromSeconds(10), 50)
-
-#define LOCAL_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
-  STATIC_HISTOGRAM_POINTER_BLOCK(                                          \
-      name, AddTimeMillisecondsGranularity(sample),                        \
-      base::Histogram::FactoryTimeGet(name, min, max, bucket_count,        \
-                                      base::HistogramBase::kNoFlags))
-
-//------------------------------------------------------------------------------
-// Memory histograms.
-//
-// For usage details, see the equivalents in histogram_macros.h.
-
-#define LOCAL_HISTOGRAM_MEMORY_KB(name, sample) LOCAL_HISTOGRAM_CUSTOM_COUNTS( \
-    name, sample, 1000, 500000, 50)
-
-//------------------------------------------------------------------------------
-// Deprecated histograms. Not recommended for current use.
-
-// TODO(rkaplow): See if we can clean up this macro and usage.
-// Legacy non-explicit version. We suggest using LOCAL_HISTOGRAM_COUNTS_1000000
-// instead.
-#define LOCAL_HISTOGRAM_COUNTS(name, sample)                                   \
-    LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 1000000, 50)
-
-#endif  // BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
diff --git a/base/metrics/histogram_samples.cc b/base/metrics/histogram_samples.cc
deleted file mode 100644
index 6830637..0000000
--- a/base/metrics/histogram_samples.cc
+++ /dev/null
@@ -1,315 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/histogram_samples.h"
-
-#include <limits>
-
-#include "base/compiler_specific.h"
-#include "base/metrics/histogram_functions.h"
-#include "base/metrics/histogram_macros.h"
-#include "base/numerics/safe_conversions.h"
-#include "base/numerics/safe_math.h"
-#include "base/pickle.h"
-
-namespace base {
-
-namespace {
-
-// A shorthand constant for the max value of size_t.
-constexpr size_t kSizeMax = std::numeric_limits<size_t>::max();
-
-// A constant stored in an AtomicSingleSample (as_atomic) to indicate that the
-// sample is "disabled" and no further accumulation should be done with it. The
-// value is chosen such that it will be MAX_UINT16 for both |bucket| & |count|,
-// and thus less likely to conflict with real use. Conflicts are explicitly
-// handled in the code but it's worth making them as unlikely as possible.
-constexpr int32_t kDisabledSingleSample = -1;
-
-class SampleCountPickleIterator : public SampleCountIterator {
- public:
-  explicit SampleCountPickleIterator(PickleIterator* iter);
-
-  bool Done() const override;
-  void Next() override;
-  void Get(HistogramBase::Sample* min,
-           int64_t* max,
-           HistogramBase::Count* count) const override;
-
- private:
-  PickleIterator* const iter_;
-
-  HistogramBase::Sample min_;
-  int64_t max_;
-  HistogramBase::Count count_;
-  bool is_done_;
-};
-
-SampleCountPickleIterator::SampleCountPickleIterator(PickleIterator* iter)
-    : iter_(iter),
-      is_done_(false) {
-  Next();
-}
-
-bool SampleCountPickleIterator::Done() const {
-  return is_done_;
-}
-
-void SampleCountPickleIterator::Next() {
-  DCHECK(!Done());
-  if (!iter_->ReadInt(&min_) || !iter_->ReadInt64(&max_) ||
-      !iter_->ReadInt(&count_)) {
-    is_done_ = true;
-  }
-}
-
-void SampleCountPickleIterator::Get(HistogramBase::Sample* min,
-                                    int64_t* max,
-                                    HistogramBase::Count* count) const {
-  DCHECK(!Done());
-  *min = min_;
-  *max = max_;
-  *count = count_;
-}
-
-}  // namespace
-
-static_assert(sizeof(HistogramSamples::AtomicSingleSample) ==
-                  sizeof(subtle::Atomic32),
-              "AtomicSingleSample isn't 32 bits");
-
-HistogramSamples::SingleSample HistogramSamples::AtomicSingleSample::Load()
-    const {
-  AtomicSingleSample single_sample = subtle::Acquire_Load(&as_atomic);
-
-  // If the sample was extracted/disabled, it's still zero to the outside.
-  if (single_sample.as_atomic == kDisabledSingleSample)
-    single_sample.as_atomic = 0;
-
-  return single_sample.as_parts;
-}
-
-HistogramSamples::SingleSample HistogramSamples::AtomicSingleSample::Extract(
-    bool disable) {
-  AtomicSingleSample single_sample = subtle::NoBarrier_AtomicExchange(
-      &as_atomic, disable ? kDisabledSingleSample : 0);
-  if (single_sample.as_atomic == kDisabledSingleSample)
-    single_sample.as_atomic = 0;
-  return single_sample.as_parts;
-}
-
-bool HistogramSamples::AtomicSingleSample::Accumulate(
-    size_t bucket,
-    HistogramBase::Count count) {
-  if (count == 0)
-    return true;
-
-  // Convert the parameters to 16-bit variables because it's all 16-bit below.
-  // To support decrements/subtractions, divide the |count| into sign/value and
-  // do the proper operation below. The alternative is to change the single-
-  // sample's count to be a signed integer (int16_t) and just add an int16_t
-  // |count16| but that is somewhat wasteful given that the single-sample is
-  // never expected to have a count less than zero.
-  if (count < -std::numeric_limits<uint16_t>::max() ||
-      count > std::numeric_limits<uint16_t>::max() ||
-      bucket > std::numeric_limits<uint16_t>::max()) {
-    return false;
-  }
-  bool count_is_negative = count < 0;
-  uint16_t count16 = static_cast<uint16_t>(count_is_negative ? -count : count);
-  uint16_t bucket16 = static_cast<uint16_t>(bucket);
-
-  // A local, unshared copy of the single-sample is necessary so the parts
-  // can be manipulated without worrying about atomicity.
-  AtomicSingleSample single_sample;
-
-  bool sample_updated;
-  do {
-    subtle::Atomic32 original = subtle::Acquire_Load(&as_atomic);
-    if (original == kDisabledSingleSample)
-      return false;
-    single_sample.as_atomic = original;
-    if (single_sample.as_atomic != 0) {
-      // Only the same bucket (parameter and stored) can be counted multiple
-      // times.
-      if (single_sample.as_parts.bucket != bucket16)
-        return false;
-    } else {
-      // The |single_ sample| was zero so becomes the |bucket| parameter, the
-      // contents of which were checked above to fit in 16 bits.
-      single_sample.as_parts.bucket = bucket16;
-    }
-
-    // Update count, making sure that it doesn't overflow.
-    CheckedNumeric<uint16_t> new_count(single_sample.as_parts.count);
-    if (count_is_negative)
-      new_count -= count16;
-    else
-      new_count += count16;
-    if (!new_count.AssignIfValid(&single_sample.as_parts.count))
-      return false;
-
-    // Don't let this become equivalent to the "disabled" value.
-    if (single_sample.as_atomic == kDisabledSingleSample)
-      return false;
-
-    // Store the updated single-sample back into memory. |existing| is what
-    // was in that memory location at the time of the call; if it doesn't
-    // match |original| then the swap didn't happen so loop again.
-    subtle::Atomic32 existing = subtle::Release_CompareAndSwap(
-        &as_atomic, original, single_sample.as_atomic);
-    sample_updated = (existing == original);
-  } while (!sample_updated);
-
-  return true;
-}
-
-bool HistogramSamples::AtomicSingleSample::IsDisabled() const {
-  return subtle::Acquire_Load(&as_atomic) == kDisabledSingleSample;
-}
-
-HistogramSamples::LocalMetadata::LocalMetadata() {
-  // This is the same way it's done for persistent metadata since no ctor
-  // is called for the data members in that case.
-  memset(this, 0, sizeof(*this));
-}
-
-HistogramSamples::HistogramSamples(uint64_t id, Metadata* meta)
-    : meta_(meta) {
-  DCHECK(meta_->id == 0 || meta_->id == id);
-
-  // It's possible that |meta| is contained in initialized, read-only memory
-  // so it's essential that no write be done in that case.
-  if (!meta_->id)
-    meta_->id = id;
-}
-
-// This mustn't do anything with |meta_|. It was passed to the ctor and may
-// be invalid by the time this dtor gets called.
-HistogramSamples::~HistogramSamples() = default;
-
-void HistogramSamples::Add(const HistogramSamples& other) {
-  IncreaseSumAndCount(other.sum(), other.redundant_count());
-  std::unique_ptr<SampleCountIterator> it = other.Iterator();
-  bool success = AddSubtractImpl(it.get(), ADD);
-  DCHECK(success);
-}
-
-bool HistogramSamples::AddFromPickle(PickleIterator* iter) {
-  int64_t sum;
-  HistogramBase::Count redundant_count;
-
-  if (!iter->ReadInt64(&sum) || !iter->ReadInt(&redundant_count))
-    return false;
-
-  IncreaseSumAndCount(sum, redundant_count);
-
-  SampleCountPickleIterator pickle_iter(iter);
-  return AddSubtractImpl(&pickle_iter, ADD);
-}
-
-void HistogramSamples::Subtract(const HistogramSamples& other) {
-  IncreaseSumAndCount(-other.sum(), -other.redundant_count());
-  std::unique_ptr<SampleCountIterator> it = other.Iterator();
-  bool success = AddSubtractImpl(it.get(), SUBTRACT);
-  DCHECK(success);
-}
-
-void HistogramSamples::Serialize(Pickle* pickle) const {
-  pickle->WriteInt64(sum());
-  pickle->WriteInt(redundant_count());
-
-  HistogramBase::Sample min;
-  int64_t max;
-  HistogramBase::Count count;
-  for (std::unique_ptr<SampleCountIterator> it = Iterator(); !it->Done();
-       it->Next()) {
-    it->Get(&min, &max, &count);
-    pickle->WriteInt(min);
-    pickle->WriteInt64(max);
-    pickle->WriteInt(count);
-  }
-}
-
-bool HistogramSamples::AccumulateSingleSample(HistogramBase::Sample value,
-                                              HistogramBase::Count count,
-                                              size_t bucket) {
-  if (single_sample().Accumulate(bucket, count)) {
-    // Success. Update the (separate) sum and redundant-count.
-    IncreaseSumAndCount(strict_cast<int64_t>(value) * count, count);
-    return true;
-  }
-  return false;
-}
-
-void HistogramSamples::IncreaseSumAndCount(int64_t sum,
-                                           HistogramBase::Count count) {
-#ifdef ARCH_CPU_64_BITS
-  subtle::NoBarrier_AtomicIncrement(&meta_->sum, sum);
-#else
-  meta_->sum += sum;
-#endif
-  subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count, count);
-}
-
-void HistogramSamples::RecordNegativeSample(NegativeSampleReason reason,
-                                            HistogramBase::Count increment) {
-  UMA_HISTOGRAM_ENUMERATION("UMA.NegativeSamples.Reason", reason,
-                            MAX_NEGATIVE_SAMPLE_REASONS);
-  UMA_HISTOGRAM_CUSTOM_COUNTS("UMA.NegativeSamples.Increment", increment, 1,
-                              1 << 30, 100);
-  UmaHistogramSparse("UMA.NegativeSamples.Histogram",
-                     static_cast<int32_t>(id()));
-}
-
-SampleCountIterator::~SampleCountIterator() = default;
-
-bool SampleCountIterator::GetBucketIndex(size_t* index) const {
-  DCHECK(!Done());
-  return false;
-}
-
-SingleSampleIterator::SingleSampleIterator(HistogramBase::Sample min,
-                                           int64_t max,
-                                           HistogramBase::Count count)
-    : SingleSampleIterator(min, max, count, kSizeMax) {}
-
-SingleSampleIterator::SingleSampleIterator(HistogramBase::Sample min,
-                                           int64_t max,
-                                           HistogramBase::Count count,
-                                           size_t bucket_index)
-    : min_(min), max_(max), bucket_index_(bucket_index), count_(count) {}
-
-SingleSampleIterator::~SingleSampleIterator() = default;
-
-bool SingleSampleIterator::Done() const {
-  return count_ == 0;
-}
-
-void SingleSampleIterator::Next() {
-  DCHECK(!Done());
-  count_ = 0;
-}
-
-void SingleSampleIterator::Get(HistogramBase::Sample* min,
-                               int64_t* max,
-                               HistogramBase::Count* count) const {
-  DCHECK(!Done());
-  if (min != nullptr)
-    *min = min_;
-  if (max != nullptr)
-    *max = max_;
-  if (count != nullptr)
-    *count = count_;
-}
-
-bool SingleSampleIterator::GetBucketIndex(size_t* index) const {
-  DCHECK(!Done());
-  if (bucket_index_ == kSizeMax)
-    return false;
-  *index = bucket_index_;
-  return true;
-}
-
-}  // namespace base
diff --git a/base/metrics/histogram_samples.h b/base/metrics/histogram_samples.h
deleted file mode 100644
index 059fd3c..0000000
--- a/base/metrics/histogram_samples.h
+++ /dev/null
@@ -1,270 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_HISTOGRAM_SAMPLES_H_
-#define BASE_METRICS_HISTOGRAM_SAMPLES_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <limits>
-#include <memory>
-
-#include "base/atomicops.h"
-#include "base/macros.h"
-#include "base/metrics/histogram_base.h"
-
-namespace base {
-
-class Pickle;
-class PickleIterator;
-class SampleCountIterator;
-
-// HistogramSamples is a container storing all samples of a histogram. All
-// elements must be of a fixed width to ensure 32/64-bit interoperability.
-// If this structure changes, bump the version number for kTypeIdHistogram
-// in persistent_histogram_allocator.cc.
-//
-// Note that though these samples are individually consistent (through the use
-// of atomic operations on the counts), there is only "eventual consistency"
-// overall when multiple threads are accessing this data. That means that the
-// sum, redundant-count, etc. could be momentarily out-of-sync with the stored
-// counts but will settle to a consistent "steady state" once all threads have
-// exited this code.
-class BASE_EXPORT HistogramSamples {
- public:
-  // A single bucket and count. To fit within a single atomic on 32-bit build
-  // architectures, both |bucket| and |count| are limited in size to 16 bits.
-  // This limits the functionality somewhat but if an entry can't fit then
-  // the full array of samples can be allocated and used.
-  struct SingleSample {
-    uint16_t bucket;
-    uint16_t count;
-  };
-
-  // A structure for managing an atomic single sample. Because this is generally
-  // used in association with other atomic values, the defined methods use
-  // acquire/release operations to guarantee ordering with outside values.
-  union BASE_EXPORT AtomicSingleSample {
-    AtomicSingleSample() : as_atomic(0) {}
-    AtomicSingleSample(subtle::Atomic32 rhs) : as_atomic(rhs) {}
-
-    // Returns the single sample in an atomic manner. This in an "acquire"
-    // load. The returned sample isn't shared and thus its fields can be safely
-    // accessed.
-    SingleSample Load() const;
-
-    // Extracts the single sample in an atomic manner. If |disable| is true
-    // then this object will be set so it will never accumulate another value.
-    // This is "no barrier" so doesn't enforce ordering with other atomic ops.
-    SingleSample Extract(bool disable);
-
-    // Adds a given count to the held bucket. If not possible, it returns false
-    // and leaves the parts unchanged. Once extracted/disabled, this always
-    // returns false. This in an "acquire/release" operation.
-    bool Accumulate(size_t bucket, HistogramBase::Count count);
-
-    // Returns if the sample has been "disabled" (via Extract) and thus not
-    // allowed to accept further accumulation.
-    bool IsDisabled() const;
-
-   private:
-    // union field: The actual sample bucket and count.
-    SingleSample as_parts;
-
-    // union field: The sample as an atomic value. Atomic64 would provide
-    // more flexibility but isn't available on all builds. This can hold a
-    // special, internal "disabled" value indicating that it must not accept
-    // further accumulation.
-    subtle::Atomic32 as_atomic;
-  };
-
-  // A structure of information about the data, common to all sample containers.
-  // Because of how this is used in persistent memory, it must be a POD object
-  // that makes sense when initialized to all zeros.
-  struct Metadata {
-    // Expected size for 32/64-bit check.
-    static constexpr size_t kExpectedInstanceSize = 24;
-
-    // Initialized when the sample-set is first created with a value provided
-    // by the caller. It is generally used to identify the sample-set across
-    // threads and processes, though not necessarily uniquely as it is possible
-    // to have multiple sample-sets representing subsets of the data.
-    uint64_t id;
-
-    // The sum of all the entries, effectivly the sum(sample * count) for
-    // all samples. Despite being atomic, no guarantees are made on the
-    // accuracy of this value; there may be races during histogram
-    // accumulation and snapshotting that we choose to accept. It should
-    // be treated as approximate.
-#ifdef ARCH_CPU_64_BITS
-    subtle::Atomic64 sum;
-#else
-    // 32-bit systems don't have atomic 64-bit operations. Use a basic type
-    // and don't worry about "shearing".
-    int64_t sum;
-#endif
-
-    // A "redundant" count helps identify memory corruption. It redundantly
-    // stores the total number of samples accumulated in the histogram. We
-    // can compare this count to the sum of the counts (TotalCount() function),
-    // and detect problems. Note, depending on the implementation of different
-    // histogram types, there might be races during histogram accumulation
-    // and snapshotting that we choose to accept. In this case, the tallies
-    // might mismatch even when no memory corruption has happened.
-    HistogramBase::AtomicCount redundant_count;
-
-    // A single histogram value and associated count. This allows histograms
-    // that typically report only a single value to not require full storage
-    // to be allocated.
-    AtomicSingleSample single_sample;  // 32 bits
-  };
-
-  // Because structures held in persistent memory must be POD, there can be no
-  // default constructor to clear the fields. This derived class exists just
-  // to clear them when being allocated on the heap.
-  struct BASE_EXPORT LocalMetadata : Metadata {
-    LocalMetadata();
-  };
-
-  HistogramSamples(uint64_t id, Metadata* meta);
-  virtual ~HistogramSamples();
-
-  virtual void Accumulate(HistogramBase::Sample value,
-                          HistogramBase::Count count) = 0;
-  virtual HistogramBase::Count GetCount(HistogramBase::Sample value) const = 0;
-  virtual HistogramBase::Count TotalCount() const = 0;
-
-  virtual void Add(const HistogramSamples& other);
-
-  // Add from serialized samples.
-  virtual bool AddFromPickle(PickleIterator* iter);
-
-  virtual void Subtract(const HistogramSamples& other);
-
-  virtual std::unique_ptr<SampleCountIterator> Iterator() const = 0;
-  virtual void Serialize(Pickle* pickle) const;
-
-  // Accessor fuctions.
-  uint64_t id() const { return meta_->id; }
-  int64_t sum() const {
-#ifdef ARCH_CPU_64_BITS
-    return subtle::NoBarrier_Load(&meta_->sum);
-#else
-    return meta_->sum;
-#endif
-  }
-  HistogramBase::Count redundant_count() const {
-    return subtle::NoBarrier_Load(&meta_->redundant_count);
-  }
-
-  // Temporarily visible for crash debugging. Should be protected.
-  // TODO(bcwhite): Move this back where it belongs.
-  // https://bugs.chromium.org/p/chromium/issues/detail?id=836875
-  Metadata* meta() { return meta_; }
-
- protected:
-  enum NegativeSampleReason {
-    SAMPLES_HAVE_LOGGED_BUT_NOT_SAMPLE,
-    SAMPLES_SAMPLE_LESS_THAN_LOGGED,
-    SAMPLES_ADDED_NEGATIVE_COUNT,
-    SAMPLES_ADD_WENT_NEGATIVE,
-    SAMPLES_ADD_OVERFLOW,
-    SAMPLES_ACCUMULATE_NEGATIVE_COUNT,
-    SAMPLES_ACCUMULATE_WENT_NEGATIVE,
-    DEPRECATED_SAMPLES_ACCUMULATE_OVERFLOW,
-    SAMPLES_ACCUMULATE_OVERFLOW,
-    MAX_NEGATIVE_SAMPLE_REASONS
-  };
-
-  // Based on |op| type, add or subtract sample counts data from the iterator.
-  enum Operator { ADD, SUBTRACT };
-  virtual bool AddSubtractImpl(SampleCountIterator* iter, Operator op) = 0;
-
-  // Accumulates to the embedded single-sample field if possible. Returns true
-  // on success, false otherwise. Sum and redundant-count are also updated in
-  // the success case.
-  bool AccumulateSingleSample(HistogramBase::Sample value,
-                              HistogramBase::Count count,
-                              size_t bucket);
-
-  // Atomically adjust the sum and redundant-count.
-  void IncreaseSumAndCount(int64_t sum, HistogramBase::Count count);
-
-  // Record a negative-sample observation and the reason why.
-  void RecordNegativeSample(NegativeSampleReason reason,
-                            HistogramBase::Count increment);
-
-  AtomicSingleSample& single_sample() { return meta_->single_sample; }
-  const AtomicSingleSample& single_sample() const {
-    return meta_->single_sample;
-  }
-
- private:
-  // Depending on derived class meta values can come from local stoarge or
-  // external storage in which case HistogramSamples class cannot take ownership
-  // of Metadata*.
-  Metadata* meta_;
-
-  DISALLOW_COPY_AND_ASSIGN(HistogramSamples);
-};
-
-class BASE_EXPORT SampleCountIterator {
- public:
-  virtual ~SampleCountIterator();
-
-  virtual bool Done() const = 0;
-  virtual void Next() = 0;
-
-  // Get the sample and count at current position.
-  // |min| |max| and |count| can be NULL if the value is not of interest.
-  // Note: |max| is int64_t because histograms support logged values in the
-  // full int32_t range and bucket max is exclusive, so it needs to support
-  // values up to MAXINT32+1.
-  // Requires: !Done();
-  virtual void Get(HistogramBase::Sample* min,
-                   int64_t* max,
-                   HistogramBase::Count* count) const = 0;
-  static_assert(std::numeric_limits<HistogramBase::Sample>::max() <
-                    std::numeric_limits<int64_t>::max(),
-                "Get() |max| must be able to hold Histogram::Sample max + 1");
-
-  // Get the index of current histogram bucket.
-  // For histograms that don't use predefined buckets, it returns false.
-  // Requires: !Done();
-  virtual bool GetBucketIndex(size_t* index) const;
-};
-
-class BASE_EXPORT SingleSampleIterator : public SampleCountIterator {
- public:
-  SingleSampleIterator(HistogramBase::Sample min,
-                       int64_t max,
-                       HistogramBase::Count count);
-  SingleSampleIterator(HistogramBase::Sample min,
-                       int64_t max,
-                       HistogramBase::Count count,
-                       size_t bucket_index);
-  ~SingleSampleIterator() override;
-
-  // SampleCountIterator:
-  bool Done() const override;
-  void Next() override;
-  void Get(HistogramBase::Sample* min,
-           int64_t* max,
-           HistogramBase::Count* count) const override;
-
-  // SampleVector uses predefined buckets so iterator can return bucket index.
-  bool GetBucketIndex(size_t* index) const override;
-
- private:
-  // Information about the single value to return.
-  const HistogramBase::Sample min_;
-  const int64_t max_;
-  const size_t bucket_index_;
-  HistogramBase::Count count_;
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_HISTOGRAM_SAMPLES_H_
diff --git a/base/metrics/histogram_snapshot_manager.cc b/base/metrics/histogram_snapshot_manager.cc
deleted file mode 100644
index c1b804e..0000000
--- a/base/metrics/histogram_snapshot_manager.cc
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/histogram_snapshot_manager.h"
-
-#include <memory>
-
-#include "base/debug/alias.h"
-#include "base/metrics/histogram_flattener.h"
-#include "base/metrics/histogram_samples.h"
-#include "base/metrics/statistics_recorder.h"
-#include "base/stl_util.h"
-
-namespace base {
-
-namespace {
-
-// A simple object to set an "active" flag and clear it upon destruction. It is
-// an error if the flag is already set.
-class MakeActive {
- public:
-  MakeActive(std::atomic<bool>* is_active) : is_active_(is_active) {
-    bool was_active = is_active_->exchange(true, std::memory_order_relaxed);
-    CHECK(!was_active);
-  }
-  ~MakeActive() { is_active_->store(false, std::memory_order_relaxed); }
-
- private:
-  std::atomic<bool>* is_active_;
-
-  DISALLOW_COPY_AND_ASSIGN(MakeActive);
-};
-
-}  // namespace
-
-HistogramSnapshotManager::HistogramSnapshotManager(
-    HistogramFlattener* histogram_flattener)
-    : histogram_flattener_(histogram_flattener) {
-  DCHECK(histogram_flattener_);
-  is_active_.store(false, std::memory_order_relaxed);
-}
-
-HistogramSnapshotManager::~HistogramSnapshotManager() = default;
-
-void HistogramSnapshotManager::PrepareDeltas(
-    const std::vector<HistogramBase*>& histograms,
-    HistogramBase::Flags flags_to_set,
-    HistogramBase::Flags required_flags) {
-  for (HistogramBase* const histogram : histograms) {
-    histogram->SetFlags(flags_to_set);
-    if ((histogram->flags() & required_flags) == required_flags)
-      PrepareDelta(histogram);
-  }
-}
-
-void HistogramSnapshotManager::PrepareDelta(HistogramBase* histogram) {
-  histogram->ValidateHistogramContents();
-  PrepareSamples(histogram, histogram->SnapshotDelta());
-}
-
-void HistogramSnapshotManager::PrepareFinalDelta(
-    const HistogramBase* histogram) {
-  histogram->ValidateHistogramContents();
-  PrepareSamples(histogram, histogram->SnapshotFinalDelta());
-}
-
-void HistogramSnapshotManager::PrepareSamples(
-    const HistogramBase* histogram,
-    std::unique_ptr<HistogramSamples> samples) {
-  DCHECK(histogram_flattener_);
-
-  // Ensure that there is no concurrent access going on while accessing the
-  // set of known histograms. The flag will be reset when this object goes
-  // out of scope.
-  MakeActive make_active(&is_active_);
-
-  // Get information known about this histogram. If it did not previously
-  // exist, one will be created and initialized.
-  SampleInfo* sample_info = &known_histograms_[histogram->name_hash()];
-
-  // Crash if we detect that our histograms have been overwritten.  This may be
-  // a fair distance from the memory smasher, but we hope to correlate these
-  // crashes with other events, such as plugins, or usage patterns, etc.
-  uint32_t corruption = histogram->FindCorruption(*samples);
-  if (HistogramBase::BUCKET_ORDER_ERROR & corruption) {
-    // Extract fields useful during debug.
-    const BucketRanges* ranges =
-        static_cast<const Histogram*>(histogram)->bucket_ranges();
-    uint32_t ranges_checksum = ranges->checksum();
-    uint32_t ranges_calc_checksum = ranges->CalculateChecksum();
-    int32_t flags = histogram->flags();
-    // The checksum should have caught this, so crash separately if it didn't.
-    CHECK_NE(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
-    CHECK(false);  // Crash for the bucket order corruption.
-    // Ensure that compiler keeps around pointers to |histogram| and its
-    // internal |bucket_ranges_| for any minidumps.
-    base::debug::Alias(&ranges_checksum);
-    base::debug::Alias(&ranges_calc_checksum);
-    base::debug::Alias(&flags);
-  }
-  // Checksum corruption might not have caused order corruption.
-  CHECK_EQ(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
-
-  // Note, at this point corruption can only be COUNT_HIGH_ERROR or
-  // COUNT_LOW_ERROR and they never arise together, so we don't need to extract
-  // bits from corruption.
-  if (corruption) {
-    DLOG(ERROR) << "Histogram: \"" << histogram->histogram_name()
-                << "\" has data corruption: " << corruption;
-    // Don't record corrupt data to metrics services.
-    const uint32_t old_corruption = sample_info->inconsistencies;
-    if (old_corruption == (corruption | old_corruption))
-      return;  // We've already seen this corruption for this histogram.
-    sample_info->inconsistencies |= corruption;
-    return;
-  }
-
-  if (samples->TotalCount() > 0)
-    histogram_flattener_->RecordDelta(*histogram, *samples);
-}
-
-}  // namespace base
diff --git a/base/metrics/histogram_snapshot_manager.h b/base/metrics/histogram_snapshot_manager.h
deleted file mode 100644
index cf7c149..0000000
--- a/base/metrics/histogram_snapshot_manager.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_HISTOGRAM_SNAPSHOT_MANAGER_H_
-#define BASE_METRICS_HISTOGRAM_SNAPSHOT_MANAGER_H_
-
-#include <stdint.h>
-
-#include <atomic>
-#include <map>
-#include <string>
-#include <vector>
-
-#include "base/gtest_prod_util.h"
-#include "base/macros.h"
-#include "base/metrics/histogram_base.h"
-
-namespace base {
-
-class HistogramSamples;
-class HistogramFlattener;
-
-// HistogramSnapshotManager handles the logistics of gathering up available
-// histograms for recording either to disk or for transmission (such as from
-// renderer to browser, or from browser to UMA upload). Since histograms can sit
-// in memory for an extended period of time, and are vulnerable to memory
-// corruption, this class also validates as much redundancy as it can before
-// calling for the marginal change (a.k.a., delta) in a histogram to be
-// recorded.
-class BASE_EXPORT HistogramSnapshotManager final {
- public:
-  explicit HistogramSnapshotManager(HistogramFlattener* histogram_flattener);
-  ~HistogramSnapshotManager();
-
-  // Snapshot all histograms, and ask |histogram_flattener_| to record the
-  // delta. |flags_to_set| is used to set flags for each histogram.
-  // |required_flags| is used to select histograms to be recorded.
-  // Only histograms that have all the flags specified by the argument will be
-  // chosen. If all histograms should be recorded, set it to
-  // |Histogram::kNoFlags|.
-  void PrepareDeltas(const std::vector<HistogramBase*>& histograms,
-                     HistogramBase::Flags flags_to_set,
-                     HistogramBase::Flags required_flags);
-
-  // When the collection is not so simple as can be done using a single
-  // iterator, the steps can be performed separately. Call PerpareDelta()
-  // as many times as necessary. PrepareFinalDelta() works like PrepareDelta()
-  // except that it does not update the previous logged values and can thus
-  // be used with read-only files.
-  void PrepareDelta(HistogramBase* histogram);
-  void PrepareFinalDelta(const HistogramBase* histogram);
-
- private:
-  FRIEND_TEST_ALL_PREFIXES(HistogramSnapshotManagerTest, CheckMerge);
-
-  // During a snapshot, samples are acquired and aggregated. This structure
-  // contains all the information for a given histogram that persists between
-  // collections.
-  struct SampleInfo {
-    // The set of inconsistencies (flags) already seen for the histogram.
-    // See HistogramBase::Inconsistency for values.
-    uint32_t inconsistencies = 0;
-  };
-
-  // Capture and hold samples from a histogram. This does all the heavy
-  // lifting for PrepareDelta() and PrepareAbsolute().
-  void PrepareSamples(const HistogramBase* histogram,
-                      std::unique_ptr<HistogramSamples> samples);
-
-  // |histogram_flattener_| handles the logistics of recording the histogram
-  // deltas.
-  HistogramFlattener* const histogram_flattener_;  // Weak.
-
-  // For histograms, track what has been previously seen, indexed
-  // by the hash of the histogram name.
-  std::map<uint64_t, SampleInfo> known_histograms_;
-
-  // A flag indicating if a thread is currently doing an operation. This is
-  // used to check against concurrent access which is not supported. A Thread-
-  // Checker is not sufficient because it may be guarded by at outside lock
-  // (as is the case with cronet).
-  std::atomic<bool> is_active_;
-
-  DISALLOW_COPY_AND_ASSIGN(HistogramSnapshotManager);
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_HISTOGRAM_SNAPSHOT_MANAGER_H_
diff --git a/base/metrics/metrics_hashes.cc b/base/metrics/metrics_hashes.cc
deleted file mode 100644
index 5672b06..0000000
--- a/base/metrics/metrics_hashes.cc
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/metrics_hashes.h"
-
-#include "base/logging.h"
-#include "base/md5.h"
-#include "base/sys_byteorder.h"
-
-namespace base {
-
-namespace {
-
-// Converts the 8-byte prefix of an MD5 hash into a uint64_t value.
-inline uint64_t DigestToUInt64(const base::MD5Digest& digest) {
-  uint64_t value;
-  DCHECK_GE(sizeof(digest.a), sizeof(value));
-  memcpy(&value, digest.a, sizeof(value));
-  return base::NetToHost64(value);
-}
-
-}  // namespace
-
-uint64_t HashMetricName(base::StringPiece name) {
-  base::MD5Digest digest;
-  base::MD5Sum(name.data(), name.size(), &digest);
-  return DigestToUInt64(digest);
-}
-
-}  // namespace metrics
diff --git a/base/metrics/metrics_hashes.h b/base/metrics/metrics_hashes.h
deleted file mode 100644
index d05c4ba..0000000
--- a/base/metrics/metrics_hashes.h
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_METRICS_HASHES_H_
-#define BASE_METRICS_METRICS_HASHES_H_
-
-#include <stdint.h>
-
-#include "base/base_export.h"
-#include "base/strings/string_piece.h"
-
-namespace base {
-
-// Computes a uint64_t hash of a given string based on its MD5 hash. Suitable
-// for metric names.
-BASE_EXPORT uint64_t HashMetricName(base::StringPiece name);
-
-}  // namespace metrics
-
-#endif  // BASE_METRICS_METRICS_HASHES_H_
diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc
deleted file mode 100644
index bfbb44b..0000000
--- a/base/metrics/persistent_histogram_allocator.cc
+++ /dev/null
@@ -1,1024 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/persistent_histogram_allocator.h"
-
-#include <memory>
-
-#include "base/atomicops.h"
-#include "base/files/file_path.h"
-#include "base/files/file_util.h"
-#include "base/files/important_file_writer.h"
-#include "base/files/memory_mapped_file.h"
-#include "base/lazy_instance.h"
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/metrics/histogram.h"
-#include "base/metrics/histogram_base.h"
-#include "base/metrics/histogram_samples.h"
-#include "base/metrics/metrics_hashes.h"
-#include "base/metrics/persistent_sample_map.h"
-#include "base/metrics/sparse_histogram.h"
-#include "base/metrics/statistics_recorder.h"
-#include "base/numerics/safe_conversions.h"
-#include "base/pickle.h"
-#include "base/process/process_handle.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_split.h"
-#include "base/strings/stringprintf.h"
-#include "base/synchronization/lock.h"
-
-namespace base {
-
-namespace {
-
-// Type identifiers used when storing in persistent memory so they can be
-// identified during extraction; the first 4 bytes of the SHA1 of the name
-// is used as a unique integer. A "version number" is added to the base
-// so that, if the structure of that object changes, stored older versions
-// will be safely ignored.
-enum : uint32_t {
-  kTypeIdRangesArray = 0xBCEA225A + 1,  // SHA1(RangesArray) v1
-  kTypeIdCountsArray = 0x53215530 + 1,  // SHA1(CountsArray) v1
-};
-
-// The current globally-active persistent allocator for all new histograms.
-// The object held here will obviously not be destructed at process exit
-// but that's best since PersistentMemoryAllocator objects (that underlie
-// GlobalHistogramAllocator objects) are explicitly forbidden from doing
-// anything essential at exit anyway due to the fact that they depend on data
-// managed elsewhere and which could be destructed first. An AtomicWord is
-// used instead of std::atomic because the latter can create global ctors
-// and dtors.
-subtle::AtomicWord g_histogram_allocator = 0;
-
-// Take an array of range boundaries and create a proper BucketRanges object
-// which is returned to the caller. A return of nullptr indicates that the
-// passed boundaries are invalid.
-std::unique_ptr<BucketRanges> CreateRangesFromData(
-    HistogramBase::Sample* ranges_data,
-    uint32_t ranges_checksum,
-    size_t count) {
-  // To avoid racy destruction at shutdown, the following may be leaked.
-  std::unique_ptr<BucketRanges> ranges(new BucketRanges(count));
-  DCHECK_EQ(count, ranges->size());
-  for (size_t i = 0; i < count; ++i) {
-    if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
-      return nullptr;
-    ranges->set_range(i, ranges_data[i]);
-  }
-
-  ranges->ResetChecksum();
-  if (ranges->checksum() != ranges_checksum)
-    return nullptr;
-
-  return ranges;
-}
-
-// Calculate the number of bytes required to store all of a histogram's
-// "counts". This will return zero (0) if |bucket_count| is not valid.
-size_t CalculateRequiredCountsBytes(size_t bucket_count) {
-  // 2 because each "sample count" also requires a backup "logged count"
-  // used for calculating the delta during snapshot operations.
-  const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
-
-  // If the |bucket_count| is such that it would overflow the return type,
-  // perhaps as the result of a malicious actor, then return zero to
-  // indicate the problem to the caller.
-  if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket)
-    return 0;
-
-  return bucket_count * kBytesPerBucket;
-}
-
-}  // namespace
-
-const Feature kPersistentHistogramsFeature{
-  "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT
-};
-
-
-PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
-    PersistentMemoryAllocator* allocator)
-    : allocator_(allocator), record_iterator_(allocator) {}
-
-PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() =
-    default;
-
-PersistentSampleMapRecords*
-PersistentSparseHistogramDataManager::UseSampleMapRecords(uint64_t id,
-                                                          const void* user) {
-  base::AutoLock auto_lock(lock_);
-  return GetSampleMapRecordsWhileLocked(id)->Acquire(user);
-}
-
-PersistentSampleMapRecords*
-PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
-    uint64_t id) {
-  lock_.AssertAcquired();
-
-  auto found = sample_records_.find(id);
-  if (found != sample_records_.end())
-    return found->second.get();
-
-  std::unique_ptr<PersistentSampleMapRecords>& samples = sample_records_[id];
-  samples = std::make_unique<PersistentSampleMapRecords>(this, id);
-  return samples.get();
-}
-
-bool PersistentSparseHistogramDataManager::LoadRecords(
-    PersistentSampleMapRecords* sample_map_records) {
-  // DataManager must be locked in order to access the found_ field of any
-  // PersistentSampleMapRecords object.
-  base::AutoLock auto_lock(lock_);
-  bool found = false;
-
-  // If there are already "found" entries for the passed object, move them.
-  if (!sample_map_records->found_.empty()) {
-    sample_map_records->records_.reserve(sample_map_records->records_.size() +
-                                         sample_map_records->found_.size());
-    sample_map_records->records_.insert(sample_map_records->records_.end(),
-                                        sample_map_records->found_.begin(),
-                                        sample_map_records->found_.end());
-    sample_map_records->found_.clear();
-    found = true;
-  }
-
-  // Acquiring a lock is a semi-expensive operation so load some records with
-  // each call. More than this number may be loaded if it takes longer to
-  // find at least one matching record for the passed object.
-  const int kMinimumNumberToLoad = 10;
-  const uint64_t match_id = sample_map_records->sample_map_id_;
-
-  // Loop while no enty is found OR we haven't yet loaded the minimum number.
-  // This will continue reading even after a match is found.
-  for (int count = 0; !found || count < kMinimumNumberToLoad; ++count) {
-    // Get the next sample-record. The iterator will always resume from where
-    // it left off even if it previously had nothing further to return.
-    uint64_t found_id;
-    PersistentMemoryAllocator::Reference ref =
-        PersistentSampleMap::GetNextPersistentRecord(record_iterator_,
-                                                     &found_id);
-
-    // Stop immediately if there are none.
-    if (!ref)
-      break;
-
-    // The sample-record could be for any sparse histogram. Add the reference
-    // to the appropriate collection for later use.
-    if (found_id == match_id) {
-      sample_map_records->records_.push_back(ref);
-      found = true;
-    } else {
-      PersistentSampleMapRecords* samples =
-          GetSampleMapRecordsWhileLocked(found_id);
-      DCHECK(samples);
-      samples->found_.push_back(ref);
-    }
-  }
-
-  return found;
-}
-
-
-PersistentSampleMapRecords::PersistentSampleMapRecords(
-    PersistentSparseHistogramDataManager* data_manager,
-    uint64_t sample_map_id)
-    : data_manager_(data_manager), sample_map_id_(sample_map_id) {}
-
-PersistentSampleMapRecords::~PersistentSampleMapRecords() = default;
-
-PersistentSampleMapRecords* PersistentSampleMapRecords::Acquire(
-    const void* user) {
-  DCHECK(!user_);
-  user_ = user;
-  seen_ = 0;
-  return this;
-}
-
-void PersistentSampleMapRecords::Release(const void* user) {
-  DCHECK_EQ(user_, user);
-  user_ = nullptr;
-}
-
-PersistentMemoryAllocator::Reference PersistentSampleMapRecords::GetNext() {
-  DCHECK(user_);
-
-  // If there are no unseen records, lock and swap in all the found ones.
-  if (records_.size() == seen_) {
-    if (!data_manager_->LoadRecords(this))
-      return false;
-  }
-
-  // Return the next record. Records *must* be returned in the same order
-  // they are found in the persistent memory in order to ensure that all
-  // objects using this data always have the same state. Race conditions
-  // can cause duplicate records so using the "first found" is the only
-  // guarantee that all objects always access the same one.
-  DCHECK_LT(seen_, records_.size());
-  return records_[seen_++];
-}
-
-PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
-    HistogramBase::Sample value) {
-  return PersistentSampleMap::CreatePersistentRecord(data_manager_->allocator_,
-                                                     sample_map_id_, value);
-}
-
-
-// This data will be held in persistent memory in order for processes to
-// locate and use histograms created elsewhere.
-struct PersistentHistogramAllocator::PersistentHistogramData {
-  // SHA1(Histogram): Increment this if structure changes!
-  static constexpr uint32_t kPersistentTypeId = 0xF1645910 + 3;
-
-  // Expected size for 32/64-bit check.
-  static constexpr size_t kExpectedInstanceSize =
-      40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize;
-
-  int32_t histogram_type;
-  int32_t flags;
-  int32_t minimum;
-  int32_t maximum;
-  uint32_t bucket_count;
-  PersistentMemoryAllocator::Reference ranges_ref;
-  uint32_t ranges_checksum;
-  subtle::Atomic32 counts_ref;  // PersistentMemoryAllocator::Reference
-  HistogramSamples::Metadata samples_metadata;
-  HistogramSamples::Metadata logged_metadata;
-
-  // Space for the histogram name will be added during the actual allocation
-  // request. This must be the last field of the structure. A zero-size array
-  // or a "flexible" array would be preferred but is not (yet) valid C++.
-  char name[sizeof(uint64_t)];  // Force 64-bit alignment on 32-bit builds.
-};
-
-PersistentHistogramAllocator::Iterator::Iterator(
-    PersistentHistogramAllocator* allocator)
-    : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {}
-
-std::unique_ptr<HistogramBase>
-PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
-  PersistentMemoryAllocator::Reference ref;
-  while ((ref = memory_iter_.GetNextOfType<PersistentHistogramData>()) != 0) {
-    if (ref != ignore)
-      return allocator_->GetHistogram(ref);
-  }
-  return nullptr;
-}
-
-
-PersistentHistogramAllocator::PersistentHistogramAllocator(
-    std::unique_ptr<PersistentMemoryAllocator> memory)
-    : memory_allocator_(std::move(memory)),
-      sparse_histogram_data_manager_(memory_allocator_.get()) {}
-
-PersistentHistogramAllocator::~PersistentHistogramAllocator() = default;
-
-std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
-    Reference ref) {
-  // Unfortunately, the histogram "pickle" methods cannot be used as part of
-  // the persistance because the deserialization methods always create local
-  // count data (while these must reference the persistent counts) and always
-  // add it to the local list of known histograms (while these may be simple
-  // references to histograms in other processes).
-  PersistentHistogramData* data =
-      memory_allocator_->GetAsObject<PersistentHistogramData>(ref);
-  const size_t length = memory_allocator_->GetAllocSize(ref);
-
-  // Check that metadata is reasonable: name is null-terminated and non-empty,
-  // ID fields have been loaded with a hash of the name (0 is considered
-  // unset/invalid).
-  if (!data || data->name[0] == '\0' ||
-      reinterpret_cast<char*>(data)[length - 1] != '\0' ||
-      data->samples_metadata.id == 0 || data->logged_metadata.id == 0 ||
-      // Note: Sparse histograms use |id + 1| in |logged_metadata|.
-      (data->logged_metadata.id != data->samples_metadata.id &&
-       data->logged_metadata.id != data->samples_metadata.id + 1) ||
-      // Most non-matching values happen due to truncated names. Ideally, we
-      // could just verify the name length based on the overall alloc length,
-      // but that doesn't work because the allocated block may have been
-      // aligned to the next boundary value.
-      HashMetricName(data->name) != data->samples_metadata.id) {
-    NOTREACHED();
-    return nullptr;
-  }
-  return CreateHistogram(data);
-}
-
-std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
-    HistogramType histogram_type,
-    const std::string& name,
-    int minimum,
-    int maximum,
-    const BucketRanges* bucket_ranges,
-    int32_t flags,
-    Reference* ref_ptr) {
-  // If the allocator is corrupt, don't waste time trying anything else.
-  // This also allows differentiating on the dashboard between allocations
-  // failed due to a corrupt allocator and the number of process instances
-  // with one, the latter being idicated by "newly corrupt", below.
-  if (memory_allocator_->IsCorrupt())
-    return nullptr;
-
-  // Create the metadata necessary for a persistent sparse histogram. This
-  // is done first because it is a small subset of what is required for
-  // other histograms. The type is "under construction" so that a crash
-  // during the datafill doesn't leave a bad record around that could cause
-  // confusion by another process trying to read it. It will be corrected
-  // once histogram construction is complete.
-  PersistentHistogramData* histogram_data =
-      memory_allocator_->New<PersistentHistogramData>(
-          offsetof(PersistentHistogramData, name) + name.length() + 1);
-  if (histogram_data) {
-    memcpy(histogram_data->name, name.c_str(), name.size() + 1);
-    histogram_data->histogram_type = histogram_type;
-    histogram_data->flags = flags | HistogramBase::kIsPersistent;
-  }
-
-  // Create the remaining metadata necessary for regular histograms.
-  if (histogram_type != SPARSE_HISTOGRAM) {
-    size_t bucket_count = bucket_ranges->bucket_count();
-    size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
-    if (counts_bytes == 0) {
-      // |bucket_count| was out-of-range.
-      NOTREACHED();
-      return nullptr;
-    }
-
-    // Since the StasticsRecorder keeps a global collection of BucketRanges
-    // objects for re-use, it would be dangerous for one to hold a reference
-    // from a persistent allocator that is not the global one (which is
-    // permanent once set). If this stops being the case, this check can
-    // become an "if" condition beside "!ranges_ref" below and before
-    // set_persistent_reference() farther down.
-    DCHECK_EQ(this, GlobalHistogramAllocator::Get());
-
-    // Re-use an existing BucketRanges persistent allocation if one is known;
-    // otherwise, create one.
-    PersistentMemoryAllocator::Reference ranges_ref =
-        bucket_ranges->persistent_reference();
-    if (!ranges_ref) {
-      size_t ranges_count = bucket_count + 1;
-      size_t ranges_bytes = ranges_count * sizeof(HistogramBase::Sample);
-      ranges_ref =
-          memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
-      if (ranges_ref) {
-        HistogramBase::Sample* ranges_data =
-            memory_allocator_->GetAsArray<HistogramBase::Sample>(
-                ranges_ref, kTypeIdRangesArray, ranges_count);
-        if (ranges_data) {
-          for (size_t i = 0; i < bucket_ranges->size(); ++i)
-            ranges_data[i] = bucket_ranges->range(i);
-          bucket_ranges->set_persistent_reference(ranges_ref);
-        } else {
-          // This should never happen but be tolerant if it does.
-          NOTREACHED();
-          ranges_ref = PersistentMemoryAllocator::kReferenceNull;
-        }
-      }
-    } else {
-      DCHECK_EQ(kTypeIdRangesArray, memory_allocator_->GetType(ranges_ref));
-    }
-
-
-    // Only continue here if all allocations were successful. If they weren't,
-    // there is no way to free the space but that's not really a problem since
-    // the allocations only fail because the space is full or corrupt and so
-    // any future attempts will also fail.
-    if (ranges_ref && histogram_data) {
-      histogram_data->minimum = minimum;
-      histogram_data->maximum = maximum;
-      // |bucket_count| must fit within 32-bits or the allocation of the counts
-      // array would have failed for being too large; the allocator supports
-      // less than 4GB total size.
-      histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
-      histogram_data->ranges_ref = ranges_ref;
-      histogram_data->ranges_checksum = bucket_ranges->checksum();
-    } else {
-      histogram_data = nullptr;  // Clear this for proper handling below.
-    }
-  }
-
-  if (histogram_data) {
-    // Create the histogram using resources in persistent memory. This ends up
-    // resolving the "ref" values stored in histogram_data instad of just
-    // using what is already known above but avoids duplicating the switch
-    // statement here and serves as a double-check that everything is
-    // correct before commiting the new histogram to persistent space.
-    std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
-    DCHECK(histogram);
-    DCHECK_NE(0U, histogram_data->samples_metadata.id);
-    DCHECK_NE(0U, histogram_data->logged_metadata.id);
-
-    PersistentMemoryAllocator::Reference histogram_ref =
-        memory_allocator_->GetAsReference(histogram_data);
-    if (ref_ptr != nullptr)
-      *ref_ptr = histogram_ref;
-
-    // By storing the reference within the allocator to this histogram, the
-    // next import (which will happen before the next histogram creation)
-    // will know to skip it.
-    // See also the comment in ImportHistogramsToStatisticsRecorder().
-    subtle::NoBarrier_Store(&last_created_, histogram_ref);
-    return histogram;
-  }
-
-  if (memory_allocator_->IsCorrupt())
-    NOTREACHED() << memory_allocator_->Name() << " is corrupt!";
-
-  return nullptr;
-}
-
-void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
-                                                     bool registered) {
-  if (registered) {
-    // If the created persistent histogram was registered then it needs to
-    // be marked as "iterable" in order to be found by other processes. This
-    // happens only after the histogram is fully formed so it's impossible for
-    // code iterating through the allocator to read a partially created record.
-    memory_allocator_->MakeIterable(ref);
-  } else {
-    // If it wasn't registered then a race condition must have caused two to
-    // be created. The allocator does not support releasing the acquired memory
-    // so just change the type to be empty.
-    memory_allocator_->ChangeType(ref, 0,
-                                  PersistentHistogramData::kPersistentTypeId,
-                                  /*clear=*/false);
-  }
-}
-
-void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
-    HistogramBase* histogram) {
-  DCHECK(histogram);
-
-  HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
-  if (!existing) {
-    // The above should never fail but if it does, no real harm is done.
-    // The data won't be merged but it also won't be recorded as merged
-    // so a future try, if successful, will get what was missed. If it
-    // continues to fail, some metric data will be lost but that is better
-    // than crashing.
-    NOTREACHED();
-    return;
-  }
-
-  // Merge the delta from the passed object to the one in the SR.
-  existing->AddSamples(*histogram->SnapshotDelta());
-}
-
-void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
-    const HistogramBase* histogram) {
-  DCHECK(histogram);
-
-  HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
-  if (!existing) {
-    // The above should never fail but if it does, no real harm is done.
-    // Some metric data will be lost but that is better than crashing.
-    NOTREACHED();
-    return;
-  }
-
-  // Merge the delta from the passed object to the one in the SR.
-  existing->AddSamples(*histogram->SnapshotFinalDelta());
-}
-
-PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords(
-    uint64_t id,
-    const void* user) {
-  return sparse_histogram_data_manager_.UseSampleMapRecords(id, user);
-}
-
-void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
-  memory_allocator_->CreateTrackingHistograms(name);
-}
-
-void PersistentHistogramAllocator::UpdateTrackingHistograms() {
-  memory_allocator_->UpdateTrackingHistograms();
-}
-
-void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
-  subtle::NoBarrier_Store(&last_created_, 0);
-}
-
-std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
-    PersistentHistogramData* histogram_data_ptr) {
-  if (!histogram_data_ptr) {
-    NOTREACHED();
-    return nullptr;
-  }
-
-  // Sparse histograms are quite different so handle them as a special case.
-  if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
-    std::unique_ptr<HistogramBase> histogram =
-        SparseHistogram::PersistentCreate(this, histogram_data_ptr->name,
-                                          &histogram_data_ptr->samples_metadata,
-                                          &histogram_data_ptr->logged_metadata);
-    DCHECK(histogram);
-    histogram->SetFlags(histogram_data_ptr->flags);
-    return histogram;
-  }
-
-  // Copy the configuration fields from histogram_data_ptr to local storage
-  // because anything in persistent memory cannot be trusted as it could be
-  // changed at any moment by a malicious actor that shares access. The local
-  // values are validated below and then used to create the histogram, knowing
-  // they haven't changed between validation and use.
-  int32_t histogram_type = histogram_data_ptr->histogram_type;
-  int32_t histogram_flags = histogram_data_ptr->flags;
-  int32_t histogram_minimum = histogram_data_ptr->minimum;
-  int32_t histogram_maximum = histogram_data_ptr->maximum;
-  uint32_t histogram_bucket_count = histogram_data_ptr->bucket_count;
-  uint32_t histogram_ranges_ref = histogram_data_ptr->ranges_ref;
-  uint32_t histogram_ranges_checksum = histogram_data_ptr->ranges_checksum;
-
-  HistogramBase::Sample* ranges_data =
-      memory_allocator_->GetAsArray<HistogramBase::Sample>(
-          histogram_ranges_ref, kTypeIdRangesArray,
-          PersistentMemoryAllocator::kSizeAny);
-
-  const uint32_t max_buckets =
-      std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
-  size_t required_bytes =
-      (histogram_bucket_count + 1) * sizeof(HistogramBase::Sample);
-  size_t allocated_bytes =
-      memory_allocator_->GetAllocSize(histogram_ranges_ref);
-  if (!ranges_data || histogram_bucket_count < 2 ||
-      histogram_bucket_count >= max_buckets ||
-      allocated_bytes < required_bytes) {
-    NOTREACHED();
-    return nullptr;
-  }
-
-  std::unique_ptr<const BucketRanges> created_ranges = CreateRangesFromData(
-      ranges_data, histogram_ranges_checksum, histogram_bucket_count + 1);
-  if (!created_ranges) {
-    NOTREACHED();
-    return nullptr;
-  }
-  const BucketRanges* ranges =
-      StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
-          created_ranges.release());
-
-  size_t counts_bytes = CalculateRequiredCountsBytes(histogram_bucket_count);
-  PersistentMemoryAllocator::Reference counts_ref =
-      subtle::Acquire_Load(&histogram_data_ptr->counts_ref);
-  if (counts_bytes == 0 ||
-      (counts_ref != 0 &&
-       memory_allocator_->GetAllocSize(counts_ref) < counts_bytes)) {
-    NOTREACHED();
-    return nullptr;
-  }
-
-  // The "counts" data (including both samples and logged samples) is a delayed
-  // persistent allocation meaning that though its size and storage for a
-  // reference is defined, no space is reserved until actually needed. When
-  // it is needed, memory will be allocated from the persistent segment and
-  // a reference to it stored at the passed address. Other threads can then
-  // notice the valid reference and access the same data.
-  DelayedPersistentAllocation counts_data(memory_allocator_.get(),
-                                          &histogram_data_ptr->counts_ref,
-                                          kTypeIdCountsArray, counts_bytes, 0);
-
-  // A second delayed allocations is defined using the same reference storage
-  // location as the first so the allocation of one will automatically be found
-  // by the other. Within the block, the first half of the space is for "counts"
-  // and the second half is for "logged counts".
-  DelayedPersistentAllocation logged_data(
-      memory_allocator_.get(), &histogram_data_ptr->counts_ref,
-      kTypeIdCountsArray, counts_bytes, counts_bytes / 2,
-      /*make_iterable=*/false);
-
-  // Create the right type of histogram.
-  const char* name = histogram_data_ptr->name;
-  std::unique_ptr<HistogramBase> histogram;
-  switch (histogram_type) {
-    case HISTOGRAM:
-      histogram = Histogram::PersistentCreate(
-          name, histogram_minimum, histogram_maximum, ranges, counts_data,
-          logged_data, &histogram_data_ptr->samples_metadata,
-          &histogram_data_ptr->logged_metadata);
-      DCHECK(histogram);
-      break;
-    case LINEAR_HISTOGRAM:
-      histogram = LinearHistogram::PersistentCreate(
-          name, histogram_minimum, histogram_maximum, ranges, counts_data,
-          logged_data, &histogram_data_ptr->samples_metadata,
-          &histogram_data_ptr->logged_metadata);
-      DCHECK(histogram);
-      break;
-    case BOOLEAN_HISTOGRAM:
-      histogram = BooleanHistogram::PersistentCreate(
-          name, ranges, counts_data, logged_data,
-          &histogram_data_ptr->samples_metadata,
-          &histogram_data_ptr->logged_metadata);
-      DCHECK(histogram);
-      break;
-    case CUSTOM_HISTOGRAM:
-      histogram = CustomHistogram::PersistentCreate(
-          name, ranges, counts_data, logged_data,
-          &histogram_data_ptr->samples_metadata,
-          &histogram_data_ptr->logged_metadata);
-      DCHECK(histogram);
-      break;
-    default:
-      NOTREACHED();
-  }
-
-  if (histogram) {
-    DCHECK_EQ(histogram_type, histogram->GetHistogramType());
-    histogram->SetFlags(histogram_flags);
-  }
-
-  return histogram;
-}
-
-HistogramBase*
-PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
-    const HistogramBase* histogram) {
-  // This should never be called on the global histogram allocator as objects
-  // created there are already within the global statistics recorder.
-  DCHECK_NE(GlobalHistogramAllocator::Get(), this);
-  DCHECK(histogram);
-
-  HistogramBase* existing =
-      StatisticsRecorder::FindHistogram(histogram->histogram_name());
-  if (existing)
-    return existing;
-
-  // Adding the passed histogram to the SR would cause a problem if the
-  // allocator that holds it eventually goes away. Instead, create a new
-  // one from a serialized version. Deserialization calls the appropriate
-  // FactoryGet() which will create the histogram in the global persistent-
-  // histogram allocator if such is set.
-  base::Pickle pickle;
-  histogram->SerializeInfo(&pickle);
-  PickleIterator iter(pickle);
-  existing = DeserializeHistogramInfo(&iter);
-  if (!existing)
-    return nullptr;
-
-  // Make sure there is no "serialization" flag set.
-  DCHECK_EQ(0, existing->flags() & HistogramBase::kIPCSerializationSourceFlag);
-  // Record the newly created histogram in the SR.
-  return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
-}
-
-GlobalHistogramAllocator::~GlobalHistogramAllocator() = default;
-
-// static
-void GlobalHistogramAllocator::CreateWithPersistentMemory(
-    void* base,
-    size_t size,
-    size_t page_size,
-    uint64_t id,
-    StringPiece name) {
-  Set(WrapUnique(
-      new GlobalHistogramAllocator(std::make_unique<PersistentMemoryAllocator>(
-          base, size, page_size, id, name, false))));
-}
-
-// static
-void GlobalHistogramAllocator::CreateWithLocalMemory(
-    size_t size,
-    uint64_t id,
-    StringPiece name) {
-  Set(WrapUnique(new GlobalHistogramAllocator(
-      std::make_unique<LocalPersistentMemoryAllocator>(size, id, name))));
-}
-
-#if !defined(OS_NACL)
-// static
-bool GlobalHistogramAllocator::CreateWithFile(
-    const FilePath& file_path,
-    size_t size,
-    uint64_t id,
-    StringPiece name) {
-  bool exists = PathExists(file_path);
-  File file(
-      file_path, File::FLAG_OPEN_ALWAYS | File::FLAG_SHARE_DELETE |
-                 File::FLAG_READ | File::FLAG_WRITE);
-
-  std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
-  if (exists) {
-    size = saturated_cast<size_t>(file.GetLength());
-    mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
-  } else {
-    mmfile->Initialize(std::move(file), {0, size},
-                       MemoryMappedFile::READ_WRITE_EXTEND);
-  }
-  if (!mmfile->IsValid() ||
-      !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
-    NOTREACHED() << file_path;
-    return false;
-  }
-
-  Set(WrapUnique(new GlobalHistogramAllocator(
-      std::make_unique<FilePersistentMemoryAllocator>(std::move(mmfile), size,
-                                                      id, name, false))));
-  Get()->SetPersistentLocation(file_path);
-  return true;
-}
-
-// static
-bool GlobalHistogramAllocator::CreateWithActiveFile(const FilePath& base_path,
-                                                    const FilePath& active_path,
-                                                    const FilePath& spare_path,
-                                                    size_t size,
-                                                    uint64_t id,
-                                                    StringPiece name) {
-  // Old "active" becomes "base".
-  if (!base::ReplaceFile(active_path, base_path, nullptr))
-    base::DeleteFile(base_path, /*recursive=*/false);
-  DCHECK(!base::PathExists(active_path));
-
-  // Move any "spare" into "active". Okay to continue if file doesn't exist.
-  if (!spare_path.empty()) {
-    base::ReplaceFile(spare_path, active_path, nullptr);
-    DCHECK(!base::PathExists(spare_path));
-  }
-
-  return base::GlobalHistogramAllocator::CreateWithFile(active_path, size, id,
-                                                        name);
-}
-
-// static
-bool GlobalHistogramAllocator::CreateWithActiveFileInDir(const FilePath& dir,
-                                                         size_t size,
-                                                         uint64_t id,
-                                                         StringPiece name) {
-  FilePath base_path, active_path, spare_path;
-  ConstructFilePaths(dir, name, &base_path, &active_path, &spare_path);
-  return CreateWithActiveFile(base_path, active_path, spare_path, size, id,
-                              name);
-}
-
-// static
-FilePath GlobalHistogramAllocator::ConstructFilePath(const FilePath& dir,
-                                                     StringPiece name) {
-  return dir.AppendASCII(name).AddExtension(
-      PersistentMemoryAllocator::kFileExtension);
-}
-
-// static
-FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir(
-    const FilePath& dir,
-    StringPiece name,
-    base::Time stamp,
-    ProcessId pid) {
-  return ConstructFilePath(
-      dir,
-      StringPrintf("%.*s-%lX-%lX", static_cast<int>(name.length()), name.data(),
-                   static_cast<long>(stamp.ToTimeT()), static_cast<long>(pid)));
-}
-
-// static
-bool GlobalHistogramAllocator::ParseFilePath(const FilePath& path,
-                                             std::string* out_name,
-                                             Time* out_stamp,
-                                             ProcessId* out_pid) {
-  std::string filename = path.BaseName().AsUTF8Unsafe();
-  std::vector<base::StringPiece> parts = base::SplitStringPiece(
-      filename, "-.", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
-  if (parts.size() != 4)
-    return false;
-
-  if (out_name)
-    *out_name = parts[0].as_string();
-
-  if (out_stamp) {
-    int64_t stamp;
-    if (!HexStringToInt64(parts[1], &stamp))
-      return false;
-    *out_stamp = Time::FromTimeT(static_cast<time_t>(stamp));
-  }
-
-  if (out_pid) {
-    int64_t pid;
-    if (!HexStringToInt64(parts[2], &pid))
-      return false;
-    *out_pid = static_cast<ProcessId>(pid);
-  }
-
-  return true;
-}
-
-// static
-void GlobalHistogramAllocator::ConstructFilePaths(const FilePath& dir,
-                                                  StringPiece name,
-                                                  FilePath* out_base_path,
-                                                  FilePath* out_active_path,
-                                                  FilePath* out_spare_path) {
-  if (out_base_path)
-    *out_base_path = ConstructFilePath(dir, name);
-
-  if (out_active_path) {
-    *out_active_path =
-        ConstructFilePath(dir, name.as_string().append("-active"));
-  }
-
-  if (out_spare_path) {
-    *out_spare_path = ConstructFilePath(dir, name.as_string().append("-spare"));
-  }
-}
-
-// static
-void GlobalHistogramAllocator::ConstructFilePathsForUploadDir(
-    const FilePath& active_dir,
-    const FilePath& upload_dir,
-    const std::string& name,
-    FilePath* out_upload_path,
-    FilePath* out_active_path,
-    FilePath* out_spare_path) {
-  if (out_upload_path) {
-    *out_upload_path = ConstructFilePathForUploadDir(
-        upload_dir, name, Time::Now(), GetCurrentProcId());
-  }
-
-  if (out_active_path) {
-    *out_active_path =
-        ConstructFilePath(active_dir, name + std::string("-active"));
-  }
-
-  if (out_spare_path) {
-    *out_spare_path =
-        ConstructFilePath(active_dir, name + std::string("-spare"));
-  }
-}
-
-// static
-bool GlobalHistogramAllocator::CreateSpareFile(const FilePath& spare_path,
-                                               size_t size) {
-  FilePath temp_spare_path = spare_path.AddExtension(FILE_PATH_LITERAL(".tmp"));
-  bool success = true;
-  {
-    File spare_file(temp_spare_path, File::FLAG_CREATE_ALWAYS |
-                                         File::FLAG_READ | File::FLAG_WRITE);
-    if (!spare_file.IsValid())
-      return false;
-
-    MemoryMappedFile mmfile;
-    mmfile.Initialize(std::move(spare_file), {0, size},
-                      MemoryMappedFile::READ_WRITE_EXTEND);
-    success = mmfile.IsValid();
-  }
-
-  if (success)
-    success = ReplaceFile(temp_spare_path, spare_path, nullptr);
-
-  if (!success)
-    DeleteFile(temp_spare_path, /*recursive=*/false);
-
-  return success;
-}
-
-// static
-bool GlobalHistogramAllocator::CreateSpareFileInDir(const FilePath& dir,
-                                                    size_t size,
-                                                    StringPiece name) {
-  FilePath spare_path;
-  ConstructFilePaths(dir, name, nullptr, nullptr, &spare_path);
-  return CreateSpareFile(spare_path, size);
-}
-#endif  // !defined(OS_NACL)
-
-// static
-void GlobalHistogramAllocator::CreateWithSharedMemoryHandle(
-    const SharedMemoryHandle& handle,
-    size_t size) {
-  std::unique_ptr<SharedMemory> shm(
-      new SharedMemory(handle, /*readonly=*/false));
-  if (!shm->Map(size) ||
-      !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
-    NOTREACHED();
-    return;
-  }
-
-  Set(WrapUnique(new GlobalHistogramAllocator(
-      std::make_unique<SharedPersistentMemoryAllocator>(
-          std::move(shm), 0, StringPiece(), /*readonly=*/false))));
-}
-
-// static
-void GlobalHistogramAllocator::Set(
-    std::unique_ptr<GlobalHistogramAllocator> allocator) {
-  // Releasing or changing an allocator is extremely dangerous because it
-  // likely has histograms stored within it. If the backing memory is also
-  // also released, future accesses to those histograms will seg-fault.
-  CHECK(!subtle::NoBarrier_Load(&g_histogram_allocator));
-  subtle::Release_Store(&g_histogram_allocator,
-                        reinterpret_cast<uintptr_t>(allocator.release()));
-  size_t existing = StatisticsRecorder::GetHistogramCount();
-
-  DVLOG_IF(1, existing)
-      << existing << " histograms were created before persistence was enabled.";
-}
-
-// static
-GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
-  return reinterpret_cast<GlobalHistogramAllocator*>(
-      subtle::Acquire_Load(&g_histogram_allocator));
-}
-
-// static
-std::unique_ptr<GlobalHistogramAllocator>
-GlobalHistogramAllocator::ReleaseForTesting() {
-  GlobalHistogramAllocator* histogram_allocator = Get();
-  if (!histogram_allocator)
-    return nullptr;
-  PersistentMemoryAllocator* memory_allocator =
-      histogram_allocator->memory_allocator();
-
-  // Before releasing the memory, it's necessary to have the Statistics-
-  // Recorder forget about the histograms contained therein; otherwise,
-  // some operations will try to access them and the released memory.
-  PersistentMemoryAllocator::Iterator iter(memory_allocator);
-  const PersistentHistogramData* data;
-  while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) {
-    StatisticsRecorder::ForgetHistogramForTesting(data->name);
-  }
-
-  subtle::Release_Store(&g_histogram_allocator, 0);
-  return WrapUnique(histogram_allocator);
-};
-
-void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
-  persistent_location_ = location;
-}
-
-const FilePath& GlobalHistogramAllocator::GetPersistentLocation() const {
-  return persistent_location_;
-}
-
-bool GlobalHistogramAllocator::WriteToPersistentLocation() {
-#if defined(OS_NACL)
-  // NACL doesn't support file operations, including ImportantFileWriter.
-  NOTREACHED();
-  return false;
-#else
-  // Stop if no destination is set.
-  if (persistent_location_.empty()) {
-    NOTREACHED() << "Could not write \"" << Name() << "\" persistent histograms"
-                 << " to file because no location was set.";
-    return false;
-  }
-
-  StringPiece contents(static_cast<const char*>(data()), used());
-  if (!ImportantFileWriter::WriteFileAtomically(persistent_location_,
-                                                contents)) {
-    LOG(ERROR) << "Could not write \"" << Name() << "\" persistent histograms"
-               << " to file: " << persistent_location_.value();
-    return false;
-  }
-
-  return true;
-#endif
-}
-
-void GlobalHistogramAllocator::DeletePersistentLocation() {
-  memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
-
-#if defined(OS_NACL)
-  NOTREACHED();
-#else
-  if (persistent_location_.empty())
-    return;
-
-  // Open (with delete) and then immediately close the file by going out of
-  // scope. This is the only cross-platform safe way to delete a file that may
-  // be open elsewhere. Open handles will continue to operate normally but
-  // new opens will not be possible.
-  File file(persistent_location_,
-            File::FLAG_OPEN | File::FLAG_READ | File::FLAG_DELETE_ON_CLOSE);
-#endif
-}
-
-GlobalHistogramAllocator::GlobalHistogramAllocator(
-    std::unique_ptr<PersistentMemoryAllocator> memory)
-    : PersistentHistogramAllocator(std::move(memory)),
-      import_iterator_(this) {
-}
-
-void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
-  // Skip the import if it's the histogram that was last created. Should a
-  // race condition cause the "last created" to be overwritten before it
-  // is recognized here then the histogram will be created and be ignored
-  // when it is detected as a duplicate by the statistics-recorder. This
-  // simple check reduces the time of creating persistent histograms by
-  // about 40%.
-  Reference record_to_ignore = last_created();
-
-  // There is no lock on this because the iterator is lock-free while still
-  // guaranteed to only return each entry only once. The StatisticsRecorder
-  // has its own lock so the Register operation is safe.
-  while (true) {
-    std::unique_ptr<HistogramBase> histogram =
-        import_iterator_.GetNextWithIgnore(record_to_ignore);
-    if (!histogram)
-      break;
-    StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
-  }
-}
-
-}  // namespace base
diff --git a/base/metrics/persistent_histogram_allocator.h b/base/metrics/persistent_histogram_allocator.h
deleted file mode 100644
index 395511f..0000000
--- a/base/metrics/persistent_histogram_allocator.h
+++ /dev/null
@@ -1,505 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
-#define BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
-
-#include <map>
-#include <memory>
-
-#include "base/atomicops.h"
-#include "base/base_export.h"
-#include "base/feature_list.h"
-#include "base/memory/shared_memory.h"
-#include "base/metrics/histogram_base.h"
-#include "base/metrics/persistent_memory_allocator.h"
-#include "base/strings/string_piece.h"
-#include "base/synchronization/lock.h"
-
-namespace base {
-
-class BucketRanges;
-class FilePath;
-class PersistentSampleMapRecords;
-class PersistentSparseHistogramDataManager;
-
-// Feature definition for enabling histogram persistence.
-BASE_EXPORT extern const Feature kPersistentHistogramsFeature;
-
-
-// A data manager for sparse histograms so each instance of such doesn't have
-// to separately iterate over the entire memory segment. Though this class
-// will generally be accessed through the PersistentHistogramAllocator above,
-// it can be used independently on any PersistentMemoryAllocator (making it
-// useable for testing). This object supports only one instance of a sparse
-// histogram for a given id. Tests that create multiple identical histograms,
-// perhaps to simulate multiple processes, should create a separate manager
-// for each.
-class BASE_EXPORT PersistentSparseHistogramDataManager {
- public:
-  // Constructs the data manager. The allocator must live longer than any
-  // managers that reference it.
-  explicit PersistentSparseHistogramDataManager(
-      PersistentMemoryAllocator* allocator);
-
-  ~PersistentSparseHistogramDataManager();
-
-  // Returns the object that manages the persistent-sample-map records for a
-  // given |id|. Only one |user| of this data is allowed at a time. This does
-  // an automatic Acquire() on the records. The user must call Release() on
-  // the returned object when it is finished with it. Ownership of the records
-  // object stays with this manager.
-  PersistentSampleMapRecords* UseSampleMapRecords(uint64_t id,
-                                                  const void* user);
-
-  // Convenience method that gets the object for a given reference so callers
-  // don't have to also keep their own pointer to the appropriate allocator.
-  template <typename T>
-  T* GetAsObject(PersistentMemoryAllocator::Reference ref) {
-    return allocator_->GetAsObject<T>(ref);
-  }
-
- private:
-  friend class PersistentSampleMapRecords;
-
-  // Gets the object holding records for a given sample-map id when |lock_|
-  // has already been acquired.
-  PersistentSampleMapRecords* GetSampleMapRecordsWhileLocked(uint64_t id);
-
-  // Loads sample-map records looking for those belonging to the specified
-  // |load_id|. Records found for other sample-maps are held for later use
-  // without having to iterate again. This should be called only from a
-  // PersistentSampleMapRecords object because those objects have a contract
-  // that there are no other threads accessing the internal records_ field
-  // of the object that is passed in.
-  bool LoadRecords(PersistentSampleMapRecords* sample_map_records);
-
-  // Weak-pointer to the allocator used by the sparse histograms.
-  PersistentMemoryAllocator* allocator_;
-
-  // Iterator within the allocator for finding sample records.
-  PersistentMemoryAllocator::Iterator record_iterator_;
-
-  // Mapping of sample-map IDs to their sample records.
-  std::map<uint64_t, std::unique_ptr<PersistentSampleMapRecords>>
-      sample_records_;
-
-  // A lock used for synchronizing changes to sample_records_.
-  base::Lock lock_;
-
-  DISALLOW_COPY_AND_ASSIGN(PersistentSparseHistogramDataManager);
-};
-
-
-// This class manages sample-records used by a PersistentSampleMap container
-// that underlies a persistent SparseHistogram object. It is broken out into a
-// top-level class so that it can be forward-declared in other header files
-// rather than include this entire file as would be necessary if it were
-// declared within the PersistentSparseHistogramDataManager class above.
-class BASE_EXPORT PersistentSampleMapRecords {
- public:
-  // Constructs an instance of this class. The manager object must live longer
-  // than all instances of this class that reference it, which is not usually
-  // a problem since these objects are generally managed from within that
-  // manager instance.
-  PersistentSampleMapRecords(PersistentSparseHistogramDataManager* data_manager,
-                             uint64_t sample_map_id);
-
-  ~PersistentSampleMapRecords();
-
-  // Resets the internal state for a new object using this data. The return
-  // value is "this" as a convenience.
-  PersistentSampleMapRecords* Acquire(const void* user);
-
-  // Indicates that the using object is done with this data.
-  void Release(const void* user);
-
-  // Gets the next reference to a persistent sample-map record. The type and
-  // layout of the data being referenced is defined entirely within the
-  // PersistentSampleMap class.
-  PersistentMemoryAllocator::Reference GetNext();
-
-  // Creates a new persistent sample-map record for sample |value| and returns
-  // a reference to it.
-  PersistentMemoryAllocator::Reference CreateNew(HistogramBase::Sample value);
-
-  // Convenience method that gets the object for a given reference so callers
-  // don't have to also keep their own pointer to the appropriate allocator.
-  // This is expected to be used with the SampleRecord structure defined inside
-  // the persistent_sample_map.cc file but since that isn't exported (for
-  // cleanliness of the interface), a template is defined that will be
-  // resolved when used inside that file.
-  template <typename T>
-  T* GetAsObject(PersistentMemoryAllocator::Reference ref) {
-    return data_manager_->GetAsObject<T>(ref);
-  }
-
- private:
-  friend PersistentSparseHistogramDataManager;
-
-  // Weak-pointer to the parent data-manager object.
-  PersistentSparseHistogramDataManager* data_manager_;
-
-  // ID of PersistentSampleMap to which these records apply.
-  const uint64_t sample_map_id_;
-
-  // The current user of this set of records. It is used to ensure that no
-  // more than one object is using these records at a given time.
-  const void* user_ = nullptr;
-
-  // This is the count of how many "records" have already been read by the
-  // owning sample-map.
-  size_t seen_ = 0;
-
-  // This is the set of records previously found for a sample map. Because
-  // there is ever only one object with a given ID (typically a hash of a
-  // histogram name) and because the parent SparseHistogram has acquired
-  // its own lock before accessing the PersistentSampleMap it controls, this
-  // list can be accessed without acquiring any additional lock.
-  std::vector<PersistentMemoryAllocator::Reference> records_;
-
-  // This is the set of records found during iteration through memory. It
-  // is appended in bulk to "records". Access to this vector can be done
-  // only while holding the parent manager's lock.
-  std::vector<PersistentMemoryAllocator::Reference> found_;
-
-  DISALLOW_COPY_AND_ASSIGN(PersistentSampleMapRecords);
-};
-
-
-// This class manages histograms created within a PersistentMemoryAllocator.
-class BASE_EXPORT PersistentHistogramAllocator {
- public:
-  // A reference to a histogram. While this is implemented as PMA::Reference,
-  // it is not conceptually the same thing. Outside callers should always use
-  // a Reference matching the class it is for and not mix the two.
-  using Reference = PersistentMemoryAllocator::Reference;
-
-  // Iterator used for fetching persistent histograms from an allocator.
-  // It is lock-free and thread-safe.
-  // See PersistentMemoryAllocator::Iterator for more information.
-  class BASE_EXPORT Iterator {
-   public:
-    // Constructs an iterator on a given |allocator|, starting at the beginning.
-    // The allocator must live beyond the lifetime of the iterator.
-    explicit Iterator(PersistentHistogramAllocator* allocator);
-
-    // Gets the next histogram from persistent memory; returns null if there
-    // are no more histograms to be found. This may still be called again
-    // later to retrieve any new histograms added in the meantime.
-    std::unique_ptr<HistogramBase> GetNext() { return GetNextWithIgnore(0); }
-
-    // Gets the next histogram from persistent memory, ignoring one particular
-    // reference in the process. Pass |ignore| of zero (0) to ignore nothing.
-    std::unique_ptr<HistogramBase> GetNextWithIgnore(Reference ignore);
-
-   private:
-    // Weak-pointer to histogram allocator being iterated over.
-    PersistentHistogramAllocator* allocator_;
-
-    // The iterator used for stepping through objects in persistent memory.
-    // It is lock-free and thread-safe which is why this class is also such.
-    PersistentMemoryAllocator::Iterator memory_iter_;
-
-    DISALLOW_COPY_AND_ASSIGN(Iterator);
-  };
-
-  // A PersistentHistogramAllocator is constructed from a PersistentMemory-
-  // Allocator object of which it takes ownership.
-  explicit PersistentHistogramAllocator(
-      std::unique_ptr<PersistentMemoryAllocator> memory);
-  virtual ~PersistentHistogramAllocator();
-
-  // Direct access to underlying memory allocator. If the segment is shared
-  // across threads or processes, reading data through these values does
-  // not guarantee consistency. Use with care. Do not write.
-  PersistentMemoryAllocator* memory_allocator() {
-    return memory_allocator_.get();
-  }
-
-  // Implement the "metadata" API of a PersistentMemoryAllocator, forwarding
-  // those requests to the real one.
-  uint64_t Id() const { return memory_allocator_->Id(); }
-  const char* Name() const { return memory_allocator_->Name(); }
-  const void* data() const { return memory_allocator_->data(); }
-  size_t length() const { return memory_allocator_->length(); }
-  size_t size() const { return memory_allocator_->size(); }
-  size_t used() const { return memory_allocator_->used(); }
-
-  // Recreate a Histogram from data held in persistent memory. Though this
-  // object will be local to the current process, the sample data will be
-  // shared with all other threads referencing it. This method takes a |ref|
-  // to where the top-level histogram data may be found in this allocator.
-  // This method will return null if any problem is detected with the data.
-  std::unique_ptr<HistogramBase> GetHistogram(Reference ref);
-
-  // Allocate a new persistent histogram. The returned histogram will not
-  // be able to be located by other allocators until it is "finalized".
-  std::unique_ptr<HistogramBase> AllocateHistogram(
-      HistogramType histogram_type,
-      const std::string& name,
-      int minimum,
-      int maximum,
-      const BucketRanges* bucket_ranges,
-      int32_t flags,
-      Reference* ref_ptr);
-
-  // Finalize the creation of the histogram, making it available to other
-  // processes if |registered| (as in: added to the StatisticsRecorder) is
-  // True, forgetting it otherwise.
-  void FinalizeHistogram(Reference ref, bool registered);
-
-  // Merges the data in a persistent histogram with one held globally by the
-  // StatisticsRecorder, updating the "logged" samples within the passed
-  // object so that repeated merges are allowed. Don't call this on a "global"
-  // allocator because histograms created there will already be in the SR.
-  void MergeHistogramDeltaToStatisticsRecorder(HistogramBase* histogram);
-
-  // As above but merge the "final" delta. No update of "logged" samples is
-  // done which means it can operate on read-only objects. It's essential,
-  // however, not to call this more than once or those final samples will
-  // get recorded again.
-  void MergeHistogramFinalDeltaToStatisticsRecorder(
-      const HistogramBase* histogram);
-
-  // Returns the object that manages the persistent-sample-map records for a
-  // given |id|. Only one |user| of this data is allowed at a time. This does
-  // an automatic Acquire() on the records. The user must call Release() on
-  // the returned object when it is finished with it. Ownership stays with
-  // this allocator.
-  PersistentSampleMapRecords* UseSampleMapRecords(uint64_t id,
-                                                  const void* user);
-
-  // Create internal histograms for tracking memory use and allocation sizes
-  // for allocator of |name| (which can simply be the result of Name()). This
-  // is done seperately from construction for situations such as when the
-  // histograms will be backed by memory provided by this very allocator.
-  //
-  // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
-  // with the following histograms:
-  //    UMA.PersistentAllocator.name.Allocs
-  //    UMA.PersistentAllocator.name.UsedPct
-  void CreateTrackingHistograms(StringPiece name);
-  void UpdateTrackingHistograms();
-
-  // Clears the internal |last_created_| reference so testing can validate
-  // operation without that optimization.
-  void ClearLastCreatedReferenceForTesting();
-
- protected:
-  // The structure used to hold histogram data in persistent memory. It is
-  // defined and used entirely within the .cc file.
-  struct PersistentHistogramData;
-
-  // Gets the reference of the last histogram created, used to avoid
-  // trying to import what was just created.
-  PersistentHistogramAllocator::Reference last_created() {
-    return subtle::NoBarrier_Load(&last_created_);
-  }
-
-  // Gets the next histogram in persistent data based on iterator while
-  // ignoring a particular reference if it is found.
-  std::unique_ptr<HistogramBase> GetNextHistogramWithIgnore(Iterator* iter,
-                                                            Reference ignore);
-
- private:
-  // Create a histogram based on saved (persistent) information about it.
-  std::unique_ptr<HistogramBase> CreateHistogram(
-      PersistentHistogramData* histogram_data_ptr);
-
-  // Gets or creates an object in the global StatisticsRecorder matching
-  // the |histogram| passed. Null is returned if one was not found and
-  // one could not be created.
-  HistogramBase* GetOrCreateStatisticsRecorderHistogram(
-      const HistogramBase* histogram);
-
-  // The memory allocator that provides the actual histogram storage.
-  std::unique_ptr<PersistentMemoryAllocator> memory_allocator_;
-
-  // The data-manager used to improve performance of sparse histograms.
-  PersistentSparseHistogramDataManager sparse_histogram_data_manager_;
-
-  // A reference to the last-created histogram in the allocator, used to avoid
-  // trying to import what was just created.
-  // TODO(bcwhite): Change this to std::atomic<PMA::Reference> when available.
-  subtle::Atomic32 last_created_ = 0;
-
-  DISALLOW_COPY_AND_ASSIGN(PersistentHistogramAllocator);
-};
-
-
-// A special case of the PersistentHistogramAllocator that operates on a
-// global scale, collecting histograms created through standard macros and
-// the FactoryGet() method.
-class BASE_EXPORT GlobalHistogramAllocator
-    : public PersistentHistogramAllocator {
- public:
-  ~GlobalHistogramAllocator() override;
-
-  // Create a global allocator using the passed-in memory |base|, |size|, and
-  // other parameters. Ownership of the memory segment remains with the caller.
-  static void CreateWithPersistentMemory(void* base,
-                                         size_t size,
-                                         size_t page_size,
-                                         uint64_t id,
-                                         StringPiece name);
-
-  // Create a global allocator using an internal block of memory of the
-  // specified |size| taken from the heap.
-  static void CreateWithLocalMemory(size_t size, uint64_t id, StringPiece name);
-
-#if !defined(OS_NACL)
-  // Create a global allocator by memory-mapping a |file|. If the file does
-  // not exist, it will be created with the specified |size|. If the file does
-  // exist, the allocator will use and add to its contents, ignoring the passed
-  // size in favor of the existing size. Returns whether the global allocator
-  // was set.
-  static bool CreateWithFile(const FilePath& file_path,
-                             size_t size,
-                             uint64_t id,
-                             StringPiece name);
-
-  // Creates a new file at |active_path|. If it already exists, it will first be
-  // moved to |base_path|. In all cases, any old file at |base_path| will be
-  // removed. If |spare_path| is non-empty and exists, that will be renamed and
-  // used as the active file. Otherwise, the file will be created using the
-  // given size, id, and name. Returns whether the global allocator was set.
-  static bool CreateWithActiveFile(const FilePath& base_path,
-                                   const FilePath& active_path,
-                                   const FilePath& spare_path,
-                                   size_t size,
-                                   uint64_t id,
-                                   StringPiece name);
-
-  // Uses ConstructBaseActivePairFilePaths() to build a pair of file names which
-  // are then used for CreateWithActiveFile(). |name| is used for both the
-  // internal name for the allocator and also for the name of the file inside
-  // |dir|.
-  static bool CreateWithActiveFileInDir(const FilePath& dir,
-                                        size_t size,
-                                        uint64_t id,
-                                        StringPiece name);
-
-  // Constructs a filename using a name.
-  static FilePath ConstructFilePath(const FilePath& dir, StringPiece name);
-
-  // Like above but with timestamp and pid for use in upload directories.
-  static FilePath ConstructFilePathForUploadDir(const FilePath& dir,
-                                                StringPiece name,
-                                                base::Time stamp,
-                                                ProcessId pid);
-
-  // Parses a filename to extract name, timestamp, and pid.
-  static bool ParseFilePath(const FilePath& path,
-                            std::string* out_name,
-                            Time* out_stamp,
-                            ProcessId* out_pid);
-
-  // Constructs a set of names in |dir| based on name that can be used for a
-  // base + active persistent memory mapped location for CreateWithActiveFile().
-  // The spare path is a file that can be pre-created and moved to be active
-  // without any startup penalty that comes from constructing the file. |name|
-  // will be used as the basename of the file inside |dir|. |out_base_path|,
-  // |out_active_path|, or |out_spare_path| may be null if not needed.
-  static void ConstructFilePaths(const FilePath& dir,
-                                 StringPiece name,
-                                 FilePath* out_base_path,
-                                 FilePath* out_active_path,
-                                 FilePath* out_spare_path);
-
-  // As above but puts the base files in a different "upload" directory. This
-  // is useful when moving all completed files into a single directory for easy
-  // upload management.
-  static void ConstructFilePathsForUploadDir(const FilePath& active_dir,
-                                             const FilePath& upload_dir,
-                                             const std::string& name,
-                                             FilePath* out_upload_path,
-                                             FilePath* out_active_path,
-                                             FilePath* out_spare_path);
-
-  // Create a "spare" file that can later be made the "active" file. This
-  // should be done on a background thread if possible.
-  static bool CreateSpareFile(const FilePath& spare_path, size_t size);
-
-  // Same as above but uses standard names. |name| is the name of the allocator
-  // and is also used to create the correct filename.
-  static bool CreateSpareFileInDir(const FilePath& dir_path,
-                                   size_t size,
-                                   StringPiece name);
-#endif
-
-  // Create a global allocator using a block of shared memory accessed
-  // through the given |handle| and |size|. The allocator takes ownership
-  // of the handle and closes it upon destruction, though the memory will
-  // continue to live if other processes have access to it.
-  static void CreateWithSharedMemoryHandle(const SharedMemoryHandle& handle,
-                                           size_t size);
-
-  // Sets a GlobalHistogramAllocator for globally storing histograms in
-  // a space that can be persisted or shared between processes. There is only
-  // ever one allocator for all such histograms created by a single process.
-  // This takes ownership of the object and should be called as soon as
-  // possible during startup to capture as many histograms as possible and
-  // while operating single-threaded so there are no race-conditions.
-  static void Set(std::unique_ptr<GlobalHistogramAllocator> allocator);
-
-  // Gets a pointer to the global histogram allocator. Returns null if none
-  // exists.
-  static GlobalHistogramAllocator* Get();
-
-  // This access to the persistent allocator is only for testing; it extracts
-  // the current allocator completely. This allows easy creation of histograms
-  // within persistent memory segments which can then be extracted and used in
-  // other ways.
-  static std::unique_ptr<GlobalHistogramAllocator> ReleaseForTesting();
-
-  // Stores a pathname to which the contents of this allocator should be saved
-  // in order to persist the data for a later use.
-  void SetPersistentLocation(const FilePath& location);
-
-  // Retrieves a previously set pathname to which the contents of this allocator
-  // are to be saved.
-  const FilePath& GetPersistentLocation() const;
-
-  // Writes the internal data to a previously set location. This is generally
-  // called when a process is exiting from a section of code that may not know
-  // the filesystem. The data is written in an atomic manner. The return value
-  // indicates success.
-  bool WriteToPersistentLocation();
-
-  // If there is a global metrics file being updated on disk, mark it to be
-  // deleted when the process exits.
-  void DeletePersistentLocation();
-
- private:
-  friend class StatisticsRecorder;
-
-  // Creates a new global histogram allocator.
-  explicit GlobalHistogramAllocator(
-      std::unique_ptr<PersistentMemoryAllocator> memory);
-
-  // Import new histograms from the global histogram allocator. It's possible
-  // for other processes to create histograms in the active memory segment;
-  // this adds those to the internal list of known histograms to avoid creating
-  // duplicates that would have to be merged during reporting. Every call to
-  // this method resumes from the last entry it saw; it costs nothing if
-  // nothing new has been added.
-  void ImportHistogramsToStatisticsRecorder();
-
-  // Builds a FilePath for a metrics file.
-  static FilePath MakeMetricsFilePath(const FilePath& dir, StringPiece name);
-
-  // Import always continues from where it left off, making use of a single
-  // iterator to continue the work.
-  Iterator import_iterator_;
-
-  // The location to which the data should be persisted.
-  FilePath persistent_location_;
-
-  DISALLOW_COPY_AND_ASSIGN(GlobalHistogramAllocator);
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
diff --git a/base/metrics/persistent_histogram_storage.cc b/base/metrics/persistent_histogram_storage.cc
deleted file mode 100644
index 8f527b0..0000000
--- a/base/metrics/persistent_histogram_storage.cc
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/persistent_histogram_storage.h"
-
-#include "base/files/file_util.h"
-#include "base/files/important_file_writer.h"
-#include "base/logging.h"
-#include "base/metrics/persistent_histogram_allocator.h"
-#include "base/metrics/persistent_memory_allocator.h"
-#include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
-#include "base/time/time.h"
-#include "build_config.h"
-
-namespace {
-
-constexpr size_t kAllocSize = 1 << 20;  // 1 MiB
-
-}  // namespace
-
-namespace base {
-
-PersistentHistogramStorage::PersistentHistogramStorage(
-    StringPiece allocator_name,
-    StorageDirManagement storage_dir_management)
-    : storage_dir_management_(storage_dir_management) {
-  DCHECK(!allocator_name.empty());
-  DCHECK(IsStringASCII(allocator_name));
-
-  GlobalHistogramAllocator::CreateWithLocalMemory(kAllocSize,
-                                                  0,  // No identifier.
-                                                  allocator_name);
-  GlobalHistogramAllocator::Get()->CreateTrackingHistograms(allocator_name);
-}
-
-PersistentHistogramStorage::~PersistentHistogramStorage() {
-  PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
-  allocator->UpdateTrackingHistograms();
-
-  // TODO(chengx): Investigate making early return depend on whethere there are
-  // metrics to report at this point or not.
-  if (disabled_)
-    return;
-
-  // Stop if the storage base directory has not been properly set.
-  if (storage_base_dir_.empty()) {
-    LOG(ERROR)
-        << "Could not write \"" << allocator->Name()
-        << "\" persistent histograms to file as the storage base directory "
-           "is not properly set.";
-    return;
-  }
-
-  FilePath storage_dir = storage_base_dir_.AppendASCII(allocator->Name());
-
-  switch (storage_dir_management_) {
-    case StorageDirManagement::kCreate:
-      if (!CreateDirectory(storage_dir)) {
-        LOG(ERROR)
-            << "Could not write \"" << allocator->Name()
-            << "\" persistent histograms to file as the storage directory "
-               "cannot be created.";
-        return;
-      }
-      break;
-    case StorageDirManagement::kUseExisting:
-      if (!DirectoryExists(storage_dir)) {
-        // When the consumer of this class decides to use an existing storage
-        // directory, it should ensure the directory's existence if it's
-        // essential.
-        LOG(ERROR)
-            << "Could not write \"" << allocator->Name()
-            << "\" persistent histograms to file as the storage directory "
-               "does not exist.";
-        return;
-      }
-      break;
-  }
-
-  // Save data using the current time as the filename. The actual filename
-  // doesn't matter (so long as it ends with the correct extension) but this
-  // works as well as anything.
-  Time::Exploded exploded;
-  Time::Now().LocalExplode(&exploded);
-  const FilePath file_path =
-      storage_dir
-          .AppendASCII(StringPrintf("%04d%02d%02d%02d%02d%02d", exploded.year,
-                                    exploded.month, exploded.day_of_month,
-                                    exploded.hour, exploded.minute,
-                                    exploded.second))
-          .AddExtension(PersistentMemoryAllocator::kFileExtension);
-
-  StringPiece contents(static_cast<const char*>(allocator->data()),
-                       allocator->used());
-  if (!ImportantFileWriter::WriteFileAtomically(file_path, contents)) {
-    LOG(ERROR) << "Persistent histograms fail to write to file: "
-               << file_path.value();
-  }
-}
-
-}  // namespace base
diff --git a/base/metrics/persistent_histogram_storage.h b/base/metrics/persistent_histogram_storage.h
deleted file mode 100644
index 397236d..0000000
--- a/base/metrics/persistent_histogram_storage.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_PERSISTENT_HISTOGRAM_STORAGE_H_
-#define BASE_METRICS_PERSISTENT_HISTOGRAM_STORAGE_H_
-
-#include "base/base_export.h"
-#include "base/files/file_path.h"
-#include "base/macros.h"
-#include "base/strings/string_piece.h"
-
-namespace base {
-
-// This class creates a fixed sized persistent memory to allow histograms to be
-// stored in it. When a PersistentHistogramStorage is destructed, histograms
-// recorded during its lifetime are persisted in the directory
-// |storage_base_dir_|/|allocator_name| (see the ctor for allocator_name).
-// Histograms are not persisted if the storage directory does not exist on
-// destruction. PersistentHistogramStorage should be instantiated as early as
-// possible in the process lifetime and should never be instantiated again.
-// Persisted histograms will eventually be reported by Chrome.
-class BASE_EXPORT PersistentHistogramStorage {
- public:
-  enum class StorageDirManagement { kCreate, kUseExisting };
-
-  // Creates a process-wide storage location for histograms that will be written
-  // to a file within a directory provided by |set_storage_base_dir()| on
-  // destruction.
-  // The |allocator_name| is used both as an internal name for the allocator,
-  // well as the leaf directory name for the file to which the histograms are
-  // persisted. The string must be ASCII.
-  // |storage_dir_management| specifies if this instance reuses an existing
-  // storage directory, or is responsible for creating one.
-  PersistentHistogramStorage(StringPiece allocator_name,
-                             StorageDirManagement storage_dir_management);
-
-  ~PersistentHistogramStorage();
-
-  // The storage directory isn't always known during initial construction so
-  // it's set separately. The last one wins if there are multiple calls to this
-  // method.
-  void set_storage_base_dir(const FilePath& storage_base_dir) {
-    storage_base_dir_ = storage_base_dir;
-  }
-
-  // Disables histogram storage.
-  void Disable() { disabled_ = true; }
-
- private:
-  // Metrics files are written into directory
-  // |storage_base_dir_|/|allocator_name| (see the ctor for allocator_name).
-  FilePath storage_base_dir_;
-
-  // The setting of the storage directory management.
-  const StorageDirManagement storage_dir_management_;
-
-  // A flag indicating if histogram storage is disabled. It starts with false,
-  // but can be set to true by the caller who decides to throw away its
-  // histogram data.
-  bool disabled_ = false;
-
-  DISALLOW_COPY_AND_ASSIGN(PersistentHistogramStorage);
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_PERSISTENT_HISTOGRAM_STORAGE_H_
diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc
deleted file mode 100644
index 7d9c03d..0000000
--- a/base/metrics/persistent_memory_allocator.cc
+++ /dev/null
@@ -1,1204 +0,0 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/persistent_memory_allocator.h"
-
-#include <assert.h>
-#include <algorithm>
-
-#if defined(OS_WIN)
-#include <windows.h>
-#include "winbase.h"
-#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
-#include <sys/mman.h>
-#endif
-
-#include "base/files/memory_mapped_file.h"
-#include "base/logging.h"
-#include "base/memory/shared_memory.h"
-#include "base/metrics/histogram_functions.h"
-#include "base/metrics/sparse_histogram.h"
-#include "base/numerics/safe_conversions.h"
-#include "base/sys_info.h"
-#include "base/threading/thread_restrictions.h"
-#include "build_config.h"
-
-namespace {
-
-// Limit of memory segment size. It has to fit in an unsigned 32-bit number
-// and should be a power of 2 in order to accomodate almost any page size.
-const uint32_t kSegmentMaxSize = 1 << 30;  // 1 GiB
-
-// A constant (random) value placed in the shared metadata to identify
-// an already initialized memory segment.
-const uint32_t kGlobalCookie = 0x408305DC;
-
-// The current version of the metadata. If updates are made that change
-// the metadata, the version number can be queried to operate in a backward-
-// compatible manner until the memory segment is completely re-initalized.
-const uint32_t kGlobalVersion = 2;
-
-// Constant values placed in the block headers to indicate its state.
-const uint32_t kBlockCookieFree = 0;
-const uint32_t kBlockCookieQueue = 1;
-const uint32_t kBlockCookieWasted = (uint32_t)-1;
-const uint32_t kBlockCookieAllocated = 0xC8799269;
-
-// TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
-// types rather than combined bitfield.
-
-// Flags stored in the flags_ field of the SharedMetadata structure below.
-enum : int {
-  kFlagCorrupt = 1 << 0,
-  kFlagFull    = 1 << 1
-};
-
-// Errors that are logged in "errors" histogram.
-enum AllocatorError : int {
-  kMemoryIsCorrupt = 1,
-};
-
-bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) {
-  uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
-  return (loaded_flags & flag) != 0;
-}
-
-void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) {
-  uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
-  for (;;) {
-    uint32_t new_flags = (loaded_flags & ~flag) | flag;
-    // In the failue case, actual "flags" value stored in loaded_flags.
-    // These access are "relaxed" because they are completely independent
-    // of all other values.
-    if (flags->compare_exchange_weak(loaded_flags, new_flags,
-                                     std::memory_order_relaxed,
-                                     std::memory_order_relaxed)) {
-      break;
-    }
-  }
-}
-
-}  // namespace
-
-namespace base {
-
-// All allocations and data-structures must be aligned to this byte boundary.
-// Alignment as large as the physical bus between CPU and RAM is _required_
-// for some architectures, is simply more efficient on other CPUs, and
-// generally a Good Idea(tm) for all platforms as it reduces/eliminates the
-// chance that a type will span cache lines. Alignment mustn't be less
-// than 8 to ensure proper alignment for all types. The rest is a balance
-// between reducing spans across multiple cache lines and wasted space spent
-// padding out allocations. An alignment of 16 would ensure that the block
-// header structure always sits in a single cache line. An average of about
-// 1/2 this value will be wasted with every allocation.
-const uint32_t PersistentMemoryAllocator::kAllocAlignment = 8;
-
-// The block-header is placed at the top of every allocation within the
-// segment to describe the data that follows it.
-struct PersistentMemoryAllocator::BlockHeader {
-  uint32_t size;       // Number of bytes in this block, including header.
-  uint32_t cookie;     // Constant value indicating completed allocation.
-  std::atomic<uint32_t> type_id;  // Arbitrary number indicating data type.
-  std::atomic<uint32_t> next;     // Pointer to the next block when iterating.
-};
-
-// The shared metadata exists once at the top of the memory segment to
-// describe the state of the allocator to all processes. The size of this
-// structure must be a multiple of 64-bits to ensure compatibility between
-// architectures.
-struct PersistentMemoryAllocator::SharedMetadata {
-  uint32_t cookie;     // Some value that indicates complete initialization.
-  uint32_t size;       // Total size of memory segment.
-  uint32_t page_size;  // Paging size within memory segment.
-  uint32_t version;    // Version code so upgrades don't break.
-  uint64_t id;         // Arbitrary ID number given by creator.
-  uint32_t name;       // Reference to stored name string.
-  uint32_t padding1;   // Pad-out read-only data to 64-bit alignment.
-
-  // Above is read-only after first construction. Below may be changed and
-  // so must be marked "volatile" to provide correct inter-process behavior.
-
-  // State of the memory, plus some padding to keep alignment.
-  volatile std::atomic<uint8_t> memory_state;  // MemoryState enum values.
-  uint8_t padding2[3];
-
-  // Bitfield of information flags. Access to this should be done through
-  // the CheckFlag() and SetFlag() methods defined above.
-  volatile std::atomic<uint32_t> flags;
-
-  // Offset/reference to first free space in segment.
-  volatile std::atomic<uint32_t> freeptr;
-
-  // The "iterable" queue is an M&S Queue as described here, append-only:
-  // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
-  // |queue| needs to be 64-bit aligned and is itself a multiple of 64 bits.
-  volatile std::atomic<uint32_t> tailptr;  // Last block of iteration queue.
-  volatile BlockHeader queue;   // Empty block for linked-list head/tail.
-};
-
-// The "queue" block header is used to detect "last node" so that zero/null
-// can be used to indicate that it hasn't been added at all. It is part of
-// the SharedMetadata structure which itself is always located at offset zero.
-const PersistentMemoryAllocator::Reference
-    PersistentMemoryAllocator::kReferenceQueue =
-        offsetof(SharedMetadata, queue);
-
-const base::FilePath::CharType PersistentMemoryAllocator::kFileExtension[] =
-    FILE_PATH_LITERAL(".pma");
-
-
-PersistentMemoryAllocator::Iterator::Iterator(
-    const PersistentMemoryAllocator* allocator)
-    : allocator_(allocator), last_record_(kReferenceQueue), record_count_(0) {}
-
-PersistentMemoryAllocator::Iterator::Iterator(
-    const PersistentMemoryAllocator* allocator,
-    Reference starting_after)
-    : allocator_(allocator), last_record_(0), record_count_(0) {
-  Reset(starting_after);
-}
-
-void PersistentMemoryAllocator::Iterator::Reset() {
-  last_record_.store(kReferenceQueue, std::memory_order_relaxed);
-  record_count_.store(0, std::memory_order_relaxed);
-}
-
-void PersistentMemoryAllocator::Iterator::Reset(Reference starting_after) {
-  if (starting_after == 0) {
-    Reset();
-    return;
-  }
-
-  last_record_.store(starting_after, std::memory_order_relaxed);
-  record_count_.store(0, std::memory_order_relaxed);
-
-  // Ensure that the starting point is a valid, iterable block (meaning it can
-  // be read and has a non-zero "next" pointer).
-  const volatile BlockHeader* block =
-      allocator_->GetBlock(starting_after, 0, 0, false, false);
-  if (!block || block->next.load(std::memory_order_relaxed) == 0) {
-    NOTREACHED();
-    last_record_.store(kReferenceQueue, std::memory_order_release);
-  }
-}
-
-PersistentMemoryAllocator::Reference
-PersistentMemoryAllocator::Iterator::GetLast() {
-  Reference last = last_record_.load(std::memory_order_relaxed);
-  if (last == kReferenceQueue)
-    return kReferenceNull;
-  return last;
-}
-
-PersistentMemoryAllocator::Reference
-PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
-  // Make a copy of the existing count of found-records, acquiring all changes
-  // made to the allocator, notably "freeptr" (see comment in loop for why
-  // the load of that value cannot be moved above here) that occurred during
-  // any previous runs of this method, including those by parallel threads
-  // that interrupted it. It pairs with the Release at the end of this method.
-  //
-  // Otherwise, if the compiler were to arrange the two loads such that
-  // "count" was fetched _after_ "freeptr" then it would be possible for
-  // this thread to be interrupted between them and other threads perform
-  // multiple allocations, make-iterables, and iterations (with the included
-  // increment of |record_count_|) culminating in the check at the bottom
-  // mistakenly determining that a loop exists. Isn't this stuff fun?
-  uint32_t count = record_count_.load(std::memory_order_acquire);
-
-  Reference last = last_record_.load(std::memory_order_acquire);
-  Reference next;
-  while (true) {
-    const volatile BlockHeader* block =
-        allocator_->GetBlock(last, 0, 0, true, false);
-    if (!block)  // Invalid iterator state.
-      return kReferenceNull;
-
-    // The compiler and CPU can freely reorder all memory accesses on which
-    // there are no dependencies. It could, for example, move the load of
-    // "freeptr" to above this point because there are no explicit dependencies
-    // between it and "next". If it did, however, then another block could
-    // be queued after that but before the following load meaning there is
-    // one more queued block than the future "detect loop by having more
-    // blocks that could fit before freeptr" will allow.
-    //
-    // By "acquiring" the "next" value here, it's synchronized to the enqueue
-    // of the node which in turn is synchronized to the allocation (which sets
-    // freeptr). Thus, the scenario above cannot happen.
-    next = block->next.load(std::memory_order_acquire);
-    if (next == kReferenceQueue)  // No next allocation in queue.
-      return kReferenceNull;
-    block = allocator_->GetBlock(next, 0, 0, false, false);
-    if (!block) {  // Memory is corrupt.
-      allocator_->SetCorrupt();
-      return kReferenceNull;
-    }
-
-    // Update the "last_record" pointer to be the reference being returned.
-    // If it fails then another thread has already iterated past it so loop
-    // again. Failing will also load the existing value into "last" so there
-    // is no need to do another such load when the while-loop restarts. A
-    // "strong" compare-exchange is used because failing unnecessarily would
-    // mean repeating some fairly costly validations above.
-    if (last_record_.compare_exchange_strong(
-            last, next, std::memory_order_acq_rel, std::memory_order_acquire)) {
-      *type_return = block->type_id.load(std::memory_order_relaxed);
-      break;
-    }
-  }
-
-  // Memory corruption could cause a loop in the list. Such must be detected
-  // so as to not cause an infinite loop in the caller. This is done by simply
-  // making sure it doesn't iterate more times than the absolute maximum
-  // number of allocations that could have been made. Callers are likely
-  // to loop multiple times before it is detected but at least it stops.
-  const uint32_t freeptr = std::min(
-      allocator_->shared_meta()->freeptr.load(std::memory_order_relaxed),
-      allocator_->mem_size_);
-  const uint32_t max_records =
-      freeptr / (sizeof(BlockHeader) + kAllocAlignment);
-  if (count > max_records) {
-    allocator_->SetCorrupt();
-    return kReferenceNull;
-  }
-
-  // Increment the count and release the changes made above. It pairs with
-  // the Acquire at the top of this method. Note that this operation is not
-  // strictly synchonized with fetching of the object to return, which would
-  // have to be done inside the loop and is somewhat complicated to achieve.
-  // It does not matter if it falls behind temporarily so long as it never
-  // gets ahead.
-  record_count_.fetch_add(1, std::memory_order_release);
-  return next;
-}
-
-PersistentMemoryAllocator::Reference
-PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match) {
-  Reference ref;
-  uint32_t type_found;
-  while ((ref = GetNext(&type_found)) != 0) {
-    if (type_found == type_match)
-      return ref;
-  }
-  return kReferenceNull;
-}
-
-
-// static
-bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base,
-                                                   size_t size,
-                                                   size_t page_size,
-                                                   bool readonly) {
-  return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) &&
-          (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) &&
-          (size % kAllocAlignment == 0 || readonly) &&
-          (page_size == 0 || size % page_size == 0 || readonly));
-}
-
-PersistentMemoryAllocator::PersistentMemoryAllocator(void* base,
-                                                     size_t size,
-                                                     size_t page_size,
-                                                     uint64_t id,
-                                                     base::StringPiece name,
-                                                     bool readonly)
-    : PersistentMemoryAllocator(Memory(base, MEM_EXTERNAL),
-                                size,
-                                page_size,
-                                id,
-                                name,
-                                readonly) {}
-
-PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
-                                                     size_t size,
-                                                     size_t page_size,
-                                                     uint64_t id,
-                                                     base::StringPiece name,
-                                                     bool readonly)
-    : mem_base_(static_cast<char*>(memory.base)),
-      mem_type_(memory.type),
-      mem_size_(static_cast<uint32_t>(size)),
-      mem_page_(static_cast<uint32_t>((page_size ? page_size : size))),
-#if defined(OS_NACL)
-      vm_page_size_(4096U),  // SysInfo is not built for NACL.
-#else
-      vm_page_size_(SysInfo::VMAllocationGranularity()),
-#endif
-      readonly_(readonly),
-      corrupt_(0),
-      allocs_histogram_(nullptr),
-      used_histogram_(nullptr),
-      errors_histogram_(nullptr) {
-  // These asserts ensure that the structures are 32/64-bit agnostic and meet
-  // all the requirements of use within the allocator. They access private
-  // definitions and so cannot be moved to the global scope.
-  static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
-                "struct is not portable across different natural word widths");
-  static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 64,
-                "struct is not portable across different natural word widths");
-
-  static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
-                "BlockHeader is not a multiple of kAllocAlignment");
-  static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
-                "SharedMetadata is not a multiple of kAllocAlignment");
-  static_assert(kReferenceQueue % kAllocAlignment == 0,
-                "\"queue\" is not aligned properly; must be at end of struct");
-
-  // Ensure that memory segment is of acceptable size.
-  CHECK(IsMemoryAcceptable(memory.base, size, page_size, readonly));
-
-  // These atomics operate inter-process and so must be lock-free. The local
-  // casts are to make sure it can be evaluated at compile time to a constant.
-  CHECK(((SharedMetadata*)nullptr)->freeptr.is_lock_free());
-  CHECK(((SharedMetadata*)nullptr)->flags.is_lock_free());
-  CHECK(((BlockHeader*)nullptr)->next.is_lock_free());
-  CHECK(corrupt_.is_lock_free());
-
-  if (shared_meta()->cookie != kGlobalCookie) {
-    if (readonly) {
-      SetCorrupt();
-      return;
-    }
-
-    // This block is only executed when a completely new memory segment is
-    // being initialized. It's unshared and single-threaded...
-    volatile BlockHeader* const first_block =
-        reinterpret_cast<volatile BlockHeader*>(mem_base_ +
-                                                sizeof(SharedMetadata));
-    if (shared_meta()->cookie != 0 ||
-        shared_meta()->size != 0 ||
-        shared_meta()->version != 0 ||
-        shared_meta()->freeptr.load(std::memory_order_relaxed) != 0 ||
-        shared_meta()->flags.load(std::memory_order_relaxed) != 0 ||
-        shared_meta()->id != 0 ||
-        shared_meta()->name != 0 ||
-        shared_meta()->tailptr != 0 ||
-        shared_meta()->queue.cookie != 0 ||
-        shared_meta()->queue.next.load(std::memory_order_relaxed) != 0 ||
-        first_block->size != 0 ||
-        first_block->cookie != 0 ||
-        first_block->type_id.load(std::memory_order_relaxed) != 0 ||
-        first_block->next != 0) {
-      // ...or something malicious has been playing with the metadata.
-      SetCorrupt();
-    }
-
-    // This is still safe to do even if corruption has been detected.
-    shared_meta()->cookie = kGlobalCookie;
-    shared_meta()->size = mem_size_;
-    shared_meta()->page_size = mem_page_;
-    shared_meta()->version = kGlobalVersion;
-    shared_meta()->id = id;
-    shared_meta()->freeptr.store(sizeof(SharedMetadata),
-                                 std::memory_order_release);
-
-    // Set up the queue of iterable allocations.
-    shared_meta()->queue.size = sizeof(BlockHeader);
-    shared_meta()->queue.cookie = kBlockCookieQueue;
-    shared_meta()->queue.next.store(kReferenceQueue, std::memory_order_release);
-    shared_meta()->tailptr.store(kReferenceQueue, std::memory_order_release);
-
-    // Allocate space for the name so other processes can learn it.
-    if (!name.empty()) {
-      const size_t name_length = name.length() + 1;
-      shared_meta()->name = Allocate(name_length, 0);
-      char* name_cstr = GetAsArray<char>(shared_meta()->name, 0, name_length);
-      if (name_cstr)
-        memcpy(name_cstr, name.data(), name.length());
-    }
-
-    shared_meta()->memory_state.store(MEMORY_INITIALIZED,
-                                      std::memory_order_release);
-  } else {
-    if (shared_meta()->size == 0 || shared_meta()->version != kGlobalVersion ||
-        shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
-        shared_meta()->tailptr == 0 || shared_meta()->queue.cookie == 0 ||
-        shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
-      SetCorrupt();
-    }
-    if (!readonly) {
-      // The allocator is attaching to a previously initialized segment of
-      // memory. If the initialization parameters differ, make the best of it
-      // by reducing the local construction parameters to match those of
-      // the actual memory area. This ensures that the local object never
-      // tries to write outside of the original bounds.
-      // Because the fields are const to ensure that no code other than the
-      // constructor makes changes to them as well as to give optimization
-      // hints to the compiler, it's necessary to const-cast them for changes
-      // here.
-      if (shared_meta()->size < mem_size_)
-        *const_cast<uint32_t*>(&mem_size_) = shared_meta()->size;
-      if (shared_meta()->page_size < mem_page_)
-        *const_cast<uint32_t*>(&mem_page_) = shared_meta()->page_size;
-
-      // Ensure that settings are still valid after the above adjustments.
-      if (!IsMemoryAcceptable(memory.base, mem_size_, mem_page_, readonly))
-        SetCorrupt();
-    }
-  }
-}
-
-PersistentMemoryAllocator::~PersistentMemoryAllocator() {
-  // It's strictly forbidden to do any memory access here in case there is
-  // some issue with the underlying memory segment. The "Local" allocator
-  // makes use of this to allow deletion of the segment on the heap from
-  // within its destructor.
-}
-
-uint64_t PersistentMemoryAllocator::Id() const {
-  return shared_meta()->id;
-}
-
-const char* PersistentMemoryAllocator::Name() const {
-  Reference name_ref = shared_meta()->name;
-  const char* name_cstr =
-      GetAsArray<char>(name_ref, 0, PersistentMemoryAllocator::kSizeAny);
-  if (!name_cstr)
-    return "";
-
-  size_t name_length = GetAllocSize(name_ref);
-  if (name_cstr[name_length - 1] != '\0') {
-    NOTREACHED();
-    SetCorrupt();
-    return "";
-  }
-
-  return name_cstr;
-}
-
-void PersistentMemoryAllocator::CreateTrackingHistograms(
-    base::StringPiece name) {
-  if (name.empty() || readonly_)
-    return;
-  std::string name_string = name.as_string();
-
-#if 0
-  // This histogram wasn't being used so has been disabled. It is left here
-  // in case development of a new use of the allocator could benefit from
-  // recording (temporarily and locally) the allocation sizes.
-  DCHECK(!allocs_histogram_);
-  allocs_histogram_ = Histogram::FactoryGet(
-      "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
-      HistogramBase::kUmaTargetedHistogramFlag);
-#endif
-
-  DCHECK(!used_histogram_);
-  used_histogram_ = LinearHistogram::FactoryGet(
-      "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
-      HistogramBase::kUmaTargetedHistogramFlag);
-
-  DCHECK(!errors_histogram_);
-  errors_histogram_ = SparseHistogram::FactoryGet(
-      "UMA.PersistentAllocator." + name_string + ".Errors",
-      HistogramBase::kUmaTargetedHistogramFlag);
-}
-
-void PersistentMemoryAllocator::Flush(bool sync) {
-  FlushPartial(used(), sync);
-}
-
-void PersistentMemoryAllocator::SetMemoryState(uint8_t memory_state) {
-  shared_meta()->memory_state.store(memory_state, std::memory_order_relaxed);
-  FlushPartial(sizeof(SharedMetadata), false);
-}
-
-uint8_t PersistentMemoryAllocator::GetMemoryState() const {
-  return shared_meta()->memory_state.load(std::memory_order_relaxed);
-}
-
-size_t PersistentMemoryAllocator::used() const {
-  return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
-                  mem_size_);
-}
-
-PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference(
-    const void* memory,
-    uint32_t type_id) const {
-  uintptr_t address = reinterpret_cast<uintptr_t>(memory);
-  if (address < reinterpret_cast<uintptr_t>(mem_base_))
-    return kReferenceNull;
-
-  uintptr_t offset = address - reinterpret_cast<uintptr_t>(mem_base_);
-  if (offset >= mem_size_ || offset < sizeof(BlockHeader))
-    return kReferenceNull;
-
-  Reference ref = static_cast<Reference>(offset) - sizeof(BlockHeader);
-  if (!GetBlockData(ref, type_id, kSizeAny))
-    return kReferenceNull;
-
-  return ref;
-}
-
-size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const {
-  const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
-  if (!block)
-    return 0;
-  uint32_t size = block->size;
-  // Header was verified by GetBlock() but a malicious actor could change
-  // the value between there and here. Check it again.
-  if (size <= sizeof(BlockHeader) || ref + size > mem_size_) {
-    SetCorrupt();
-    return 0;
-  }
-  return size - sizeof(BlockHeader);
-}
-
-uint32_t PersistentMemoryAllocator::GetType(Reference ref) const {
-  const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
-  if (!block)
-    return 0;
-  return block->type_id.load(std::memory_order_relaxed);
-}
-
-bool PersistentMemoryAllocator::ChangeType(Reference ref,
-                                           uint32_t to_type_id,
-                                           uint32_t from_type_id,
-                                           bool clear) {
-  DCHECK(!readonly_);
-  volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
-  if (!block)
-    return false;
-
-  // "Strong" exchanges are used below because there is no loop that can retry
-  // in the wake of spurious failures possible with "weak" exchanges. It is,
-  // in aggregate, an "acquire-release" operation so no memory accesses can be
-  // reordered either before or after this method (since changes based on type
-  // could happen on either side).
-
-  if (clear) {
-    // If clearing the memory, first change it to the "transitioning" type so
-    // there can be no confusion by other threads. After the memory is cleared,
-    // it can be changed to its final type.
-    if (!block->type_id.compare_exchange_strong(
-            from_type_id, kTypeIdTransitioning, std::memory_order_acquire,
-            std::memory_order_acquire)) {
-      // Existing type wasn't what was expected: fail (with no changes)
-      return false;
-    }
-
-    // Clear the memory in an atomic manner. Using "release" stores force
-    // every write to be done after the ones before it. This is better than
-    // using memset because (a) it supports "volatile" and (b) it creates a
-    // reliable pattern upon which other threads may rely.
-    volatile std::atomic<int>* data =
-        reinterpret_cast<volatile std::atomic<int>*>(
-            reinterpret_cast<volatile char*>(block) + sizeof(BlockHeader));
-    const uint32_t words = (block->size - sizeof(BlockHeader)) / sizeof(int);
-    DCHECK_EQ(0U, (block->size - sizeof(BlockHeader)) % sizeof(int));
-    for (uint32_t i = 0; i < words; ++i) {
-      data->store(0, std::memory_order_release);
-      ++data;
-    }
-
-    // If the destination type is "transitioning" then skip the final exchange.
-    if (to_type_id == kTypeIdTransitioning)
-      return true;
-
-    // Finish the change to the desired type.
-    from_type_id = kTypeIdTransitioning;  // Exchange needs modifiable original.
-    bool success = block->type_id.compare_exchange_strong(
-        from_type_id, to_type_id, std::memory_order_release,
-        std::memory_order_relaxed);
-    DCHECK(success);  // Should never fail.
-    return success;
-  }
-
-  // One step change to the new type. Will return false if the existing value
-  // doesn't match what is expected.
-  return block->type_id.compare_exchange_strong(from_type_id, to_type_id,
-                                                std::memory_order_acq_rel,
-                                                std::memory_order_acquire);
-}
-
-PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
-    size_t req_size,
-    uint32_t type_id) {
-  Reference ref = AllocateImpl(req_size, type_id);
-  if (ref) {
-    // Success: Record this allocation in usage stats (if active).
-    if (allocs_histogram_)
-      allocs_histogram_->Add(static_cast<HistogramBase::Sample>(req_size));
-  } else {
-    // Failure: Record an allocation of zero for tracking.
-    if (allocs_histogram_)
-      allocs_histogram_->Add(0);
-  }
-  return ref;
-}
-
-PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
-    size_t req_size,
-    uint32_t type_id) {
-  DCHECK(!readonly_);
-
-  // Validate req_size to ensure it won't overflow when used as 32-bit value.
-  if (req_size > kSegmentMaxSize - sizeof(BlockHeader)) {
-    NOTREACHED();
-    return kReferenceNull;
-  }
-
-  // Round up the requested size, plus header, to the next allocation alignment.
-  uint32_t size = static_cast<uint32_t>(req_size + sizeof(BlockHeader));
-  size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
-  if (size <= sizeof(BlockHeader) || size > mem_page_) {
-    NOTREACHED();
-    return kReferenceNull;
-  }
-
-  // Get the current start of unallocated memory. Other threads may
-  // update this at any time and cause us to retry these operations.
-  // This value should be treated as "const" to avoid confusion through
-  // the code below but recognize that any failed compare-exchange operation
-  // involving it will cause it to be loaded with a more recent value. The
-  // code should either exit or restart the loop in that case.
-  /* const */ uint32_t freeptr =
-      shared_meta()->freeptr.load(std::memory_order_acquire);
-
-  // Allocation is lockless so we do all our caculation and then, if saving
-  // indicates a change has occurred since we started, scrap everything and
-  // start over.
-  for (;;) {
-    if (IsCorrupt())
-      return kReferenceNull;
-
-    if (freeptr + size > mem_size_) {
-      SetFlag(&shared_meta()->flags, kFlagFull);
-      return kReferenceNull;
-    }
-
-    // Get pointer to the "free" block. If something has been allocated since
-    // the load of freeptr above, it is still safe as nothing will be written
-    // to that location until after the compare-exchange below.
-    volatile BlockHeader* const block = GetBlock(freeptr, 0, 0, false, true);
-    if (!block) {
-      SetCorrupt();
-      return kReferenceNull;
-    }
-
-    // An allocation cannot cross page boundaries. If it would, create a
-    // "wasted" block and begin again at the top of the next page. This
-    // area could just be left empty but we fill in the block header just
-    // for completeness sake.
-    const uint32_t page_free = mem_page_ - freeptr % mem_page_;
-    if (size > page_free) {
-      if (page_free <= sizeof(BlockHeader)) {
-        SetCorrupt();
-        return kReferenceNull;
-      }
-      const uint32_t new_freeptr = freeptr + page_free;
-      if (shared_meta()->freeptr.compare_exchange_strong(
-              freeptr, new_freeptr, std::memory_order_acq_rel,
-              std::memory_order_acquire)) {
-        block->size = page_free;
-        block->cookie = kBlockCookieWasted;
-      }
-      continue;
-    }
-
-    // Don't leave a slice at the end of a page too small for anything. This
-    // can result in an allocation up to two alignment-sizes greater than the
-    // minimum required by requested-size + header + alignment.
-    if (page_free - size < sizeof(BlockHeader) + kAllocAlignment)
-      size = page_free;
-
-    const uint32_t new_freeptr = freeptr + size;
-    if (new_freeptr > mem_size_) {
-      SetCorrupt();
-      return kReferenceNull;
-    }
-
-    // Save our work. Try again if another thread has completed an allocation
-    // while we were processing. A "weak" exchange would be permissable here
-    // because the code will just loop and try again but the above processing
-    // is significant so make the extra effort of a "strong" exchange.
-    if (!shared_meta()->freeptr.compare_exchange_strong(
-            freeptr, new_freeptr, std::memory_order_acq_rel,
-            std::memory_order_acquire)) {
-      continue;
-    }
-
-    // Given that all memory was zeroed before ever being given to an instance
-    // of this class and given that we only allocate in a monotomic fashion
-    // going forward, it must be that the newly allocated block is completely
-    // full of zeros. If we find anything in the block header that is NOT a
-    // zero then something must have previously run amuck through memory,
-    // writing beyond the allocated space and into unallocated space.
-    if (block->size != 0 ||
-        block->cookie != kBlockCookieFree ||
-        block->type_id.load(std::memory_order_relaxed) != 0 ||
-        block->next.load(std::memory_order_relaxed) != 0) {
-      SetCorrupt();
-      return kReferenceNull;
-    }
-
-    // Make sure the memory exists by writing to the first byte of every memory
-    // page it touches beyond the one containing the block header itself.
-    // As the underlying storage is often memory mapped from disk or shared
-    // space, sometimes things go wrong and those address don't actually exist
-    // leading to a SIGBUS (or Windows equivalent) at some arbitrary location
-    // in the code. This should concentrate all those failures into this
-    // location for easy tracking and, eventually, proper handling.
-    volatile char* mem_end = reinterpret_cast<volatile char*>(block) + size;
-    volatile char* mem_begin = reinterpret_cast<volatile char*>(
-        (reinterpret_cast<uintptr_t>(block) + sizeof(BlockHeader) +
-         (vm_page_size_ - 1)) &
-        ~static_cast<uintptr_t>(vm_page_size_ - 1));
-    for (volatile char* memory = mem_begin; memory < mem_end;
-         memory += vm_page_size_) {
-      // It's required that a memory segment start as all zeros and thus the
-      // newly allocated block is all zeros at this point. Thus, writing a
-      // zero to it allows testing that the memory exists without actually
-      // changing its contents. The compiler doesn't know about the requirement
-      // and so cannot optimize-away these writes.
-      *memory = 0;
-    }
-
-    // Load information into the block header. There is no "release" of the
-    // data here because this memory can, currently, be seen only by the thread
-    // performing the allocation. When it comes time to share this, the thread
-    // will call MakeIterable() which does the release operation.
-    block->size = size;
-    block->cookie = kBlockCookieAllocated;
-    block->type_id.store(type_id, std::memory_order_relaxed);
-    return freeptr;
-  }
-}
-
-void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
-  uint32_t remaining = std::max(
-      mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
-      (uint32_t)sizeof(BlockHeader));
-  meminfo->total = mem_size_;
-  meminfo->free = remaining - sizeof(BlockHeader);
-}
-
-void PersistentMemoryAllocator::MakeIterable(Reference ref) {
-  DCHECK(!readonly_);
-  if (IsCorrupt())
-    return;
-  volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false);
-  if (!block)  // invalid reference
-    return;
-  if (block->next.load(std::memory_order_acquire) != 0)  // Already iterable.
-    return;
-  block->next.store(kReferenceQueue, std::memory_order_release);  // New tail.
-
-  // Try to add this block to the tail of the queue. May take multiple tries.
-  // If so, tail will be automatically updated with a more recent value during
-  // compare-exchange operations.
-  uint32_t tail = shared_meta()->tailptr.load(std::memory_order_acquire);
-  for (;;) {
-    // Acquire the current tail-pointer released by previous call to this
-    // method and validate it.
-    block = GetBlock(tail, 0, 0, true, false);
-    if (!block) {
-      SetCorrupt();
-      return;
-    }
-
-    // Try to insert the block at the tail of the queue. The tail node always
-    // has an existing value of kReferenceQueue; if that is somehow not the
-    // existing value then another thread has acted in the meantime. A "strong"
-    // exchange is necessary so the "else" block does not get executed when
-    // that is not actually the case (which can happen with a "weak" exchange).
-    uint32_t next = kReferenceQueue;  // Will get replaced with existing value.
-    if (block->next.compare_exchange_strong(next, ref,
-                                            std::memory_order_acq_rel,
-                                            std::memory_order_acquire)) {
-      // Update the tail pointer to the new offset. If the "else" clause did
-      // not exist, then this could be a simple Release_Store to set the new
-      // value but because it does, it's possible that other threads could add
-      // one or more nodes at the tail before reaching this point. We don't
-      // have to check the return value because it either operates correctly
-      // or the exact same operation has already been done (by the "else"
-      // clause) on some other thread.
-      shared_meta()->tailptr.compare_exchange_strong(tail, ref,
-                                                     std::memory_order_release,
-                                                     std::memory_order_relaxed);
-      return;
-    } else {
-      // In the unlikely case that a thread crashed or was killed between the
-      // update of "next" and the update of "tailptr", it is necessary to
-      // perform the operation that would have been done. There's no explicit
-      // check for crash/kill which means that this operation may also happen
-      // even when the other thread is in perfect working order which is what
-      // necessitates the CompareAndSwap above.
-      shared_meta()->tailptr.compare_exchange_strong(tail, next,
-                                                     std::memory_order_acq_rel,
-                                                     std::memory_order_acquire);
-    }
-  }
-}
-
-// The "corrupted" state is held both locally and globally (shared). The
-// shared flag can't be trusted since a malicious actor could overwrite it.
-// Because corruption can be detected during read-only operations such as
-// iteration, this method may be called by other "const" methods. In this
-// case, it's safe to discard the constness and modify the local flag and
-// maybe even the shared flag if the underlying data isn't actually read-only.
-void PersistentMemoryAllocator::SetCorrupt() const {
-  if (!corrupt_.load(std::memory_order_relaxed) &&
-      !CheckFlag(
-          const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
-          kFlagCorrupt)) {
-    LOG(ERROR) << "Corruption detected in shared-memory segment.";
-    RecordError(kMemoryIsCorrupt);
-  }
-
-  corrupt_.store(true, std::memory_order_relaxed);
-  if (!readonly_) {
-    SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
-            kFlagCorrupt);
-  }
-}
-
-bool PersistentMemoryAllocator::IsCorrupt() const {
-  if (corrupt_.load(std::memory_order_relaxed) ||
-      CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
-    SetCorrupt();  // Make sure all indicators are set.
-    return true;
-  }
-  return false;
-}
-
-bool PersistentMemoryAllocator::IsFull() const {
-  return CheckFlag(&shared_meta()->flags, kFlagFull);
-}
-
-// Dereference a block |ref| and ensure that it's valid for the desired
-// |type_id| and |size|. |special| indicates that we may try to access block
-// headers not available to callers but still accessed by this module. By
-// having internal dereferences go through this same function, the allocator
-// is hardened against corruption.
-const volatile PersistentMemoryAllocator::BlockHeader*
-PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
-                                    uint32_t size, bool queue_ok,
-                                    bool free_ok) const {
-  // Handle special cases.
-  if (ref == kReferenceQueue && queue_ok)
-    return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
-
-  // Validation of parameters.
-  if (ref < sizeof(SharedMetadata))
-    return nullptr;
-  if (ref % kAllocAlignment != 0)
-    return nullptr;
-  size += sizeof(BlockHeader);
-  if (ref + size > mem_size_)
-    return nullptr;
-
-  // Validation of referenced block-header.
-  if (!free_ok) {
-    const volatile BlockHeader* const block =
-        reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
-    if (block->cookie != kBlockCookieAllocated)
-      return nullptr;
-    if (block->size < size)
-      return nullptr;
-    if (ref + block->size > mem_size_)
-      return nullptr;
-    if (type_id != 0 &&
-        block->type_id.load(std::memory_order_relaxed) != type_id) {
-      return nullptr;
-    }
-  }
-
-  // Return pointer to block data.
-  return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
-}
-
-void PersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
-  // Generally there is nothing to do as every write is done through volatile
-  // memory with atomic instructions to guarantee consistency. This (virtual)
-  // method exists so that derivced classes can do special things, such as
-  // tell the OS to write changes to disk now rather than when convenient.
-}
-
-void PersistentMemoryAllocator::RecordError(int error) const {
-  if (errors_histogram_)
-    errors_histogram_->Add(error);
-}
-
-const volatile void* PersistentMemoryAllocator::GetBlockData(
-    Reference ref,
-    uint32_t type_id,
-    uint32_t size) const {
-  DCHECK(size > 0);
-  const volatile BlockHeader* block =
-      GetBlock(ref, type_id, size, false, false);
-  if (!block)
-    return nullptr;
-  return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader);
-}
-
-void PersistentMemoryAllocator::UpdateTrackingHistograms() {
-  DCHECK(!readonly_);
-  if (used_histogram_) {
-    MemoryInfo meminfo;
-    GetMemoryInfo(&meminfo);
-    HistogramBase::Sample used_percent = static_cast<HistogramBase::Sample>(
-        ((meminfo.total - meminfo.free) * 100ULL / meminfo.total));
-    used_histogram_->Add(used_percent);
-  }
-}
-
-
-//----- LocalPersistentMemoryAllocator -----------------------------------------
-
-LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator(
-    size_t size,
-    uint64_t id,
-    base::StringPiece name)
-    : PersistentMemoryAllocator(AllocateLocalMemory(size),
-                                size, 0, id, name, false) {}
-
-LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() {
-  DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_, mem_type_);
-}
-
-// static
-PersistentMemoryAllocator::Memory
-LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size) {
-  void* address;
-
-#if defined(OS_WIN)
-  address =
-      ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
-  if (address)
-    return Memory(address, MEM_VIRTUAL);
-  UmaHistogramSparse("UMA.LocalPersistentMemoryAllocator.Failures.Win",
-                     ::GetLastError());
-#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
-  // MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
-  // MAP_SHARED is not available on Linux <2.4 but required on Mac.
-  address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE,
-                   MAP_ANON | MAP_SHARED, -1, 0);
-  if (address != MAP_FAILED)
-    return Memory(address, MEM_VIRTUAL);
-  UmaHistogramSparse("UMA.LocalPersistentMemoryAllocator.Failures.Posix",
-                     errno);
-#else
-#error This architecture is not (yet) supported.
-#endif
-
-  // As a last resort, just allocate the memory from the heap. This will
-  // achieve the same basic result but the acquired memory has to be
-  // explicitly zeroed and thus realized immediately (i.e. all pages are
-  // added to the process now istead of only when first accessed).
-  address = malloc(size);
-  DPCHECK(address);
-  memset(address, 0, size);
-  return Memory(address, MEM_MALLOC);
-}
-
-// static
-void LocalPersistentMemoryAllocator::DeallocateLocalMemory(void* memory,
-                                                           size_t size,
-                                                           MemoryType type) {
-  if (type == MEM_MALLOC) {
-    free(memory);
-    return;
-  }
-
-  DCHECK_EQ(MEM_VIRTUAL, type);
-#if defined(OS_WIN)
-  BOOL success = ::VirtualFree(memory, 0, MEM_DECOMMIT);
-  DCHECK(success);
-#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
-  int result = ::munmap(memory, size);
-  DCHECK_EQ(0, result);
-#else
-#error This architecture is not (yet) supported.
-#endif
-}
-
-
-//----- SharedPersistentMemoryAllocator ----------------------------------------
-
-SharedPersistentMemoryAllocator::SharedPersistentMemoryAllocator(
-    std::unique_ptr<SharedMemory> memory,
-    uint64_t id,
-    base::StringPiece name,
-    bool read_only)
-    : PersistentMemoryAllocator(
-          Memory(static_cast<uint8_t*>(memory->memory()), MEM_SHARED),
-          memory->mapped_size(),
-          0,
-          id,
-          name,
-          read_only),
-      shared_memory_(std::move(memory)) {}
-
-SharedPersistentMemoryAllocator::~SharedPersistentMemoryAllocator() = default;
-
-// static
-bool SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
-    const SharedMemory& memory) {
-  return IsMemoryAcceptable(memory.memory(), memory.mapped_size(), 0, false);
-}
-
-
-#if !defined(OS_NACL)
-//----- FilePersistentMemoryAllocator ------------------------------------------
-
-FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
-    std::unique_ptr<MemoryMappedFile> file,
-    size_t max_size,
-    uint64_t id,
-    base::StringPiece name,
-    bool read_only)
-    : PersistentMemoryAllocator(
-          Memory(const_cast<uint8_t*>(file->data()), MEM_FILE),
-          max_size != 0 ? max_size : file->length(),
-          0,
-          id,
-          name,
-          read_only),
-      mapped_file_(std::move(file)) {}
-
-FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() = default;
-
-// static
-bool FilePersistentMemoryAllocator::IsFileAcceptable(
-    const MemoryMappedFile& file,
-    bool read_only) {
-  return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
-}
-
-void FilePersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
-  if (sync)
-    AssertBlockingAllowed();
-  if (IsReadonly())
-    return;
-
-#if defined(OS_WIN)
-  // Windows doesn't support asynchronous flush.
-  AssertBlockingAllowed();
-  BOOL success = ::FlushViewOfFile(data(), length);
-  DPCHECK(success);
-#elif defined(OS_MACOSX)
-  // On OSX, "invalidate" removes all cached pages, forcing a re-read from
-  // disk. That's not applicable to "flush" so omit it.
-  int result =
-      ::msync(const_cast<void*>(data()), length, sync ? MS_SYNC : MS_ASYNC);
-  DCHECK_NE(EINVAL, result);
-#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
-  // On POSIX, "invalidate" forces _other_ processes to recognize what has
-  // been written to disk and so is applicable to "flush".
-  int result = ::msync(const_cast<void*>(data()), length,
-                       MS_INVALIDATE | (sync ? MS_SYNC : MS_ASYNC));
-  DCHECK_NE(EINVAL, result);
-#else
-#error Unsupported OS.
-#endif
-}
-#endif  // !defined(OS_NACL)
-
-//----- DelayedPersistentAllocation --------------------------------------------
-
-// Forwarding constructors.
-DelayedPersistentAllocation::DelayedPersistentAllocation(
-    PersistentMemoryAllocator* allocator,
-    subtle::Atomic32* ref,
-    uint32_t type,
-    size_t size,
-    bool make_iterable)
-    : DelayedPersistentAllocation(
-          allocator,
-          reinterpret_cast<std::atomic<Reference>*>(ref),
-          type,
-          size,
-          0,
-          make_iterable) {}
-
-DelayedPersistentAllocation::DelayedPersistentAllocation(
-    PersistentMemoryAllocator* allocator,
-    subtle::Atomic32* ref,
-    uint32_t type,
-    size_t size,
-    size_t offset,
-    bool make_iterable)
-    : DelayedPersistentAllocation(
-          allocator,
-          reinterpret_cast<std::atomic<Reference>*>(ref),
-          type,
-          size,
-          offset,
-          make_iterable) {}
-
-DelayedPersistentAllocation::DelayedPersistentAllocation(
-    PersistentMemoryAllocator* allocator,
-    std::atomic<Reference>* ref,
-    uint32_t type,
-    size_t size,
-    bool make_iterable)
-    : DelayedPersistentAllocation(allocator,
-                                  ref,
-                                  type,
-                                  size,
-                                  0,
-                                  make_iterable) {}
-
-// Real constructor.
-DelayedPersistentAllocation::DelayedPersistentAllocation(
-    PersistentMemoryAllocator* allocator,
-    std::atomic<Reference>* ref,
-    uint32_t type,
-    size_t size,
-    size_t offset,
-    bool make_iterable)
-    : allocator_(allocator),
-      type_(type),
-      size_(checked_cast<uint32_t>(size)),
-      offset_(checked_cast<uint32_t>(offset)),
-      make_iterable_(make_iterable),
-      reference_(ref) {
-  DCHECK(allocator_);
-  DCHECK_NE(0U, type_);
-  DCHECK_LT(0U, size_);
-  DCHECK(reference_);
-}
-
-DelayedPersistentAllocation::~DelayedPersistentAllocation() = default;
-
-void* DelayedPersistentAllocation::Get() const {
-  // Relaxed operations are acceptable here because it's not protecting the
-  // contents of the allocation in any way.
-  Reference ref = reference_->load(std::memory_order_acquire);
-  if (!ref) {
-    ref = allocator_->Allocate(size_, type_);
-    if (!ref)
-      return nullptr;
-
-    // Store the new reference in its proper location using compare-and-swap.
-    // Use a "strong" exchange to ensure no false-negatives since the operation
-    // cannot be retried.
-    Reference existing = 0;  // Must be mutable; receives actual value.
-    if (reference_->compare_exchange_strong(existing, ref,
-                                            std::memory_order_release,
-                                            std::memory_order_relaxed)) {
-      if (make_iterable_)
-        allocator_->MakeIterable(ref);
-    } else {
-      // Failure indicates that something else has raced ahead, performed the
-      // allocation, and stored its reference. Purge the allocation that was
-      // just done and use the other one instead.
-      DCHECK_EQ(type_, allocator_->GetType(existing));
-      DCHECK_LE(size_, allocator_->GetAllocSize(existing));
-      allocator_->ChangeType(ref, 0, type_, /*clear=*/false);
-      ref = existing;
-    }
-  }
-
-  char* mem = allocator_->GetAsArray<char>(ref, type_, size_);
-  if (!mem) {
-    // This should never happen but be tolerant if it does as corruption from
-    // the outside is something to guard against.
-    NOTREACHED();
-    return nullptr;
-  }
-  return mem + offset_;
-}
-
-}  // namespace base
diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h
deleted file mode 100644
index 978a362..0000000
--- a/base/metrics/persistent_memory_allocator.h
+++ /dev/null
@@ -1,872 +0,0 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
-#define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
-
-#include <stdint.h>
-
-#include <atomic>
-#include <memory>
-#include <type_traits>
-
-#include "base/atomicops.h"
-#include "base/base_export.h"
-#include "base/files/file_path.h"
-#include "base/gtest_prod_util.h"
-#include "base/macros.h"
-#include "base/strings/string_piece.h"
-
-namespace base {
-
-class HistogramBase;
-class MemoryMappedFile;
-class SharedMemory;
-
-// Simple allocator for pieces of a memory block that may be persistent
-// to some storage or shared across multiple processes. This class resides
-// under base/metrics because it was written for that purpose. It is,
-// however, fully general-purpose and can be freely moved to base/memory
-// if other uses are found.
-//
-// This class provides for thread-secure (i.e. safe against other threads
-// or processes that may be compromised and thus have malicious intent)
-// allocation of memory within a designated block and also a mechanism by
-// which other threads can learn of these allocations.
-//
-// There is (currently) no way to release an allocated block of data because
-// doing so would risk invalidating pointers held by other processes and
-// greatly complicate the allocation algorithm.
-//
-// Construction of this object can accept new, clean (i.e. zeroed) memory
-// or previously initialized memory. In the first case, construction must
-// be allowed to complete before letting other allocators attach to the same
-// segment. In other words, don't share the segment until at least one
-// allocator has been attached to it.
-//
-// Note that memory not in active use is not accessed so it is possible to
-// use virtual memory, including memory-mapped files, as backing storage with
-// the OS "pinning" new (zeroed) physical RAM pages only as they are needed.
-//
-// OBJECTS: Although the allocator can be used in a "malloc" sense, fetching
-// character arrays and manipulating that memory manually, the better way is
-// generally to use the "object" methods to create and manage allocations. In
-// this way the sizing, type-checking, and construction are all automatic. For
-// this to work, however, every type of stored object must define two public
-// "constexpr" values, kPersistentTypeId and kExpectedInstanceSize, as such:
-//
-// struct MyPersistentObjectType {
-//     // SHA1(MyPersistentObjectType): Increment this if structure changes!
-//     static constexpr uint32_t kPersistentTypeId = 0x3E15F6DE + 1;
-//
-//     // Expected size for 32/64-bit check. Update this if structure changes!
-//     static constexpr size_t kExpectedInstanceSize = 20;
-//
-//     ...
-// };
-//
-// kPersistentTypeId: This value is an arbitrary identifier that allows the
-//   identification of these objects in the allocator, including the ability
-//   to find them via iteration. The number is arbitrary but using the first
-//   four bytes of the SHA1 hash of the type name means that there shouldn't
-//   be any conflicts with other types that may also be stored in the memory.
-//   The fully qualified name (e.g. base::debug::MyPersistentObjectType) could
-//   be used to generate the hash if the type name seems common. Use a command
-//   like this to get the hash: echo -n "MyPersistentObjectType" | sha1sum
-//   If the structure layout changes, ALWAYS increment this number so that
-//   newer versions of the code don't try to interpret persistent data written
-//   by older versions with a different layout.
-//
-// kExpectedInstanceSize: This value is the hard-coded number that matches
-//   what sizeof(T) would return. By providing it explicitly, the allocator can
-//   verify that the structure is compatible between both 32-bit and 64-bit
-//   versions of the code.
-//
-// Using New manages the memory and then calls the default constructor for the
-// object. Given that objects are persistent, no destructor is ever called
-// automatically though a caller can explicitly call Delete to destruct it and
-// change the type to something indicating it is no longer in use.
-//
-// Though persistent memory segments are transferrable between programs built
-// for different natural word widths, they CANNOT be exchanged between CPUs
-// of different endianess. Attempts to do so will simply see the existing data
-// as corrupt and refuse to access any of it.
-class BASE_EXPORT PersistentMemoryAllocator {
- public:
-  typedef uint32_t Reference;
-
-  // These states are used to indicate the overall condition of the memory
-  // segment irrespective of what is stored within it. Because the data is
-  // often persistent and thus needs to be readable by different versions of
-  // a program, these values are fixed and can never change.
-  enum MemoryState : uint8_t {
-    // Persistent memory starts all zeros and so shows "uninitialized".
-    MEMORY_UNINITIALIZED = 0,
-
-    // The header has been written and the memory is ready for use.
-    MEMORY_INITIALIZED = 1,
-
-    // The data should be considered deleted. This would be set when the
-    // allocator is being cleaned up. If file-backed, the file is likely
-    // to be deleted but since deletion can fail for a variety of reasons,
-    // having this extra status means a future reader can realize what
-    // should have happened.
-    MEMORY_DELETED = 2,
-
-    // Outside code can create states starting with this number; these too
-    // must also never change between code versions.
-    MEMORY_USER_DEFINED = 100,
-  };
-
-  // Iterator for going through all iterable memory records in an allocator.
-  // Like the allocator itself, iterators are lock-free and thread-secure.
-  // That means that multiple threads can share an iterator and the same
-  // reference will not be returned twice.
-  //
-  // The order of the items returned by an iterator matches the order in which
-  // MakeIterable() was called on them. Once an allocation is made iterable,
-  // it is always such so the only possible difference between successive
-  // iterations is for more to be added to the end.
-  //
-  // Iteration, in general, is tolerant of corrupted memory. It will return
-  // what it can and stop only when corruption forces it to. Bad corruption
-  // could cause the same object to be returned many times but it will
-  // eventually quit.
-  class BASE_EXPORT Iterator {
-   public:
-    // Constructs an iterator on a given |allocator|, starting at the beginning.
-    // The allocator must live beyond the lifetime of the iterator. This class
-    // has read-only access to the allocator (hence "const") but the returned
-    // references can be used on a read/write version, too.
-    explicit Iterator(const PersistentMemoryAllocator* allocator);
-
-    // As above but resuming from the |starting_after| reference. The first call
-    // to GetNext() will return the next object found after that reference. The
-    // reference must be to an "iterable" object; references to non-iterable
-    // objects (those that never had MakeIterable() called for them) will cause
-    // a run-time error.
-    Iterator(const PersistentMemoryAllocator* allocator,
-             Reference starting_after);
-
-    // Resets the iterator back to the beginning.
-    void Reset();
-
-    // Resets the iterator, resuming from the |starting_after| reference.
-    void Reset(Reference starting_after);
-
-    // Returns the previously retrieved reference, or kReferenceNull if none.
-    // If constructor or reset with a starting_after location, this will return
-    // that value.
-    Reference GetLast();
-
-    // Gets the next iterable, storing that type in |type_return|. The actual
-    // return value is a reference to the allocation inside the allocator or
-    // zero if there are no more. GetNext() may still be called again at a
-    // later time to retrieve any new allocations that have been added.
-    Reference GetNext(uint32_t* type_return);
-
-    // Similar to above but gets the next iterable of a specific |type_match|.
-    // This should not be mixed with calls to GetNext() because any allocations
-    // skipped here due to a type mis-match will never be returned by later
-    // calls to GetNext() meaning it's possible to completely miss entries.
-    Reference GetNextOfType(uint32_t type_match);
-
-    // As above but works using object type.
-    template <typename T>
-    Reference GetNextOfType() {
-      return GetNextOfType(T::kPersistentTypeId);
-    }
-
-    // As above but works using objects and returns null if not found.
-    template <typename T>
-    const T* GetNextOfObject() {
-      return GetAsObject<T>(GetNextOfType<T>());
-    }
-
-    // Converts references to objects. This is a convenience method so that
-    // users of the iterator don't need to also have their own pointer to the
-    // allocator over which the iterator runs in order to retrieve objects.
-    // Because the iterator is not read/write, only "const" objects can be
-    // fetched. Non-const objects can be fetched using the reference on a
-    // non-const (external) pointer to the same allocator (or use const_cast
-    // to remove the qualifier).
-    template <typename T>
-    const T* GetAsObject(Reference ref) const {
-      return allocator_->GetAsObject<T>(ref);
-    }
-
-    // Similar to GetAsObject() but converts references to arrays of things.
-    template <typename T>
-    const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
-      return allocator_->GetAsArray<T>(ref, type_id, count);
-    }
-
-    // Convert a generic pointer back into a reference. A null reference will
-    // be returned if |memory| is not inside the persistent segment or does not
-    // point to an object of the specified |type_id|.
-    Reference GetAsReference(const void* memory, uint32_t type_id) const {
-      return allocator_->GetAsReference(memory, type_id);
-    }
-
-    // As above but convert an object back into a reference.
-    template <typename T>
-    Reference GetAsReference(const T* obj) const {
-      return allocator_->GetAsReference(obj);
-    }
-
-   private:
-    // Weak-pointer to memory allocator being iterated over.
-    const PersistentMemoryAllocator* allocator_;
-
-    // The last record that was returned.
-    std::atomic<Reference> last_record_;
-
-    // The number of records found; used for detecting loops.
-    std::atomic<uint32_t> record_count_;
-
-    DISALLOW_COPY_AND_ASSIGN(Iterator);
-  };
-
-  // Returned information about the internal state of the heap.
-  struct MemoryInfo {
-    size_t total;
-    size_t free;
-  };
-
-  enum : Reference {
-    // A common "null" reference value.
-    kReferenceNull = 0,
-  };
-
-  enum : uint32_t {
-    // A value that will match any type when doing lookups.
-    kTypeIdAny = 0x00000000,
-
-    // A value indicating that the type is in transition. Work is being done
-    // on the contents to prepare it for a new type to come.
-    kTypeIdTransitioning = 0xFFFFFFFF,
-  };
-
-  enum : size_t {
-    kSizeAny = 1  // Constant indicating that any array size is acceptable.
-  };
-
-  // This is the standard file extension (suitable for being passed to the
-  // AddExtension() method of base::FilePath) for dumps of persistent memory.
-  static const base::FilePath::CharType kFileExtension[];
-
-  // The allocator operates on any arbitrary block of memory. Creation and
-  // persisting or sharing of that block with another process is the
-  // responsibility of the caller. The allocator needs to know only the
-  // block's |base| address, the total |size| of the block, and any internal
-  // |page| size (zero if not paged) across which allocations should not span.
-  // The |id| is an arbitrary value the caller can use to identify a
-  // particular memory segment. It will only be loaded during the initial
-  // creation of the segment and can be checked by the caller for consistency.
-  // The |name|, if provided, is used to distinguish histograms for this
-  // allocator. Only the primary owner of the segment should define this value;
-  // other processes can learn it from the shared state. If the underlying
-  // memory is |readonly| then no changes will be made to it. The resulting
-  // object should be stored as a "const" pointer.
-  //
-  // PersistentMemoryAllocator does NOT take ownership of the memory block.
-  // The caller must manage it and ensure it stays available throughout the
-  // lifetime of this object.
-  //
-  // Memory segments for sharing must have had an allocator attached to them
-  // before actually being shared. If the memory segment was just created, it
-  // should be zeroed before being passed here. If it was an existing segment,
-  // the values here will be compared to copies stored in the shared segment
-  // as a guard against corruption.
-  //
-  // Make sure that the memory segment is acceptable (see IsMemoryAcceptable()
-  // method below) before construction if the definition of the segment can
-  // vary in any way at run-time. Invalid memory segments will cause a crash.
-  PersistentMemoryAllocator(void* base, size_t size, size_t page_size,
-                            uint64_t id, base::StringPiece name,
-                            bool readonly);
-  virtual ~PersistentMemoryAllocator();
-
-  // Check if memory segment is acceptable for creation of an Allocator. This
-  // doesn't do any analysis of the data and so doesn't guarantee that the
-  // contents are valid, just that the paramaters won't cause the program to
-  // abort. The IsCorrupt() method will report detection of data problems
-  // found during construction and general operation.
-  static bool IsMemoryAcceptable(const void* data, size_t size,
-                                 size_t page_size, bool readonly);
-
-  // Get the internal identifier for this persistent memory segment.
-  uint64_t Id() const;
-
-  // Get the internal name of this allocator (possibly an empty string).
-  const char* Name() const;
-
-  // Is this segment open only for read?
-  bool IsReadonly() const { return readonly_; }
-
-  // Manage the saved state of the memory.
-  void SetMemoryState(uint8_t memory_state);
-  uint8_t GetMemoryState() const;
-
-  // Create internal histograms for tracking memory use and allocation sizes
-  // for allocator of |name| (which can simply be the result of Name()). This
-  // is done seperately from construction for situations such as when the
-  // histograms will be backed by memory provided by this very allocator.
-  //
-  // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
-  // with the following histograms:
-  //    UMA.PersistentAllocator.name.Errors
-  //    UMA.PersistentAllocator.name.UsedPct
-  void CreateTrackingHistograms(base::StringPiece name);
-
-  // Flushes the persistent memory to any backing store. This typically does
-  // nothing but is used by the FilePersistentMemoryAllocator to inform the
-  // OS that all the data should be sent to the disk immediately. This is
-  // useful in the rare case where something has just been stored that needs
-  // to survive a hard shutdown of the machine like from a power failure.
-  // The |sync| parameter indicates if this call should block until the flush
-  // is complete but is only advisory and may or may not have an effect
-  // depending on the capabilities of the OS. Synchronous flushes are allowed
-  // only from theads that are allowed to do I/O but since |sync| is only
-  // advisory, all flushes should be done on IO-capable threads.
-  void Flush(bool sync);
-
-  // Direct access to underlying memory segment. If the segment is shared
-  // across threads or processes, reading data through these values does
-  // not guarantee consistency. Use with care. Do not write.
-  const void* data() const { return const_cast<const char*>(mem_base_); }
-  size_t length() const { return mem_size_; }
-  size_t size() const { return mem_size_; }
-  size_t used() const;
-
-  // Get an object referenced by a |ref|. For safety reasons, the |type_id|
-  // code and size-of(|T|) are compared to ensure the reference is valid
-  // and cannot return an object outside of the memory segment. A |type_id| of
-  // kTypeIdAny (zero) will match any though the size is still checked. NULL is
-  // returned if any problem is detected, such as corrupted storage or incorrect
-  // parameters. Callers MUST check that the returned value is not-null EVERY
-  // TIME before accessing it or risk crashing! Once dereferenced, the pointer
-  // is safe to reuse forever.
-  //
-  // It is essential that the object be of a fixed size. All fields must be of
-  // a defined type that does not change based on the compiler or the CPU
-  // natural word size. Acceptable are char, float, double, and (u)intXX_t.
-  // Unacceptable are int, bool, and wchar_t which are implementation defined
-  // with regards to their size.
-  //
-  // Alignment must also be consistent. A uint64_t after a uint32_t will pad
-  // differently between 32 and 64 bit architectures. Either put the bigger
-  // elements first, group smaller elements into blocks the size of larger
-  // elements, or manually insert padding fields as appropriate for the
-  // largest architecture, including at the end.
-  //
-  // To protected against mistakes, all objects must have the attribute
-  // |kExpectedInstanceSize| (static constexpr size_t)  that is a hard-coded
-  // numerical value -- NNN, not sizeof(T) -- that can be tested. If the
-  // instance size is not fixed, at least one build will fail.
-  //
-  // If the size of a structure changes, the type-ID used to recognize it
-  // should also change so later versions of the code don't try to read
-  // incompatible structures from earlier versions.
-  //
-  // NOTE: Though this method will guarantee that an object of the specified
-  // type can be accessed without going outside the bounds of the memory
-  // segment, it makes no guarantees of the validity of the data within the
-  // object itself. If it is expected that the contents of the segment could
-  // be compromised with malicious intent, the object must be hardened as well.
-  //
-  // Though the persistent data may be "volatile" if it is shared with
-  // other processes, such is not necessarily the case. The internal
-  // "volatile" designation is discarded so as to not propagate the viral
-  // nature of that keyword to the caller. It can add it back, if necessary,
-  // based on knowledge of how the allocator is being used.
-  template <typename T>
-  T* GetAsObject(Reference ref) {
-    static_assert(std::is_standard_layout<T>::value, "only standard objects");
-    static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
-    static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
-    return const_cast<T*>(reinterpret_cast<volatile T*>(
-        GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
-  }
-  template <typename T>
-  const T* GetAsObject(Reference ref) const {
-    static_assert(std::is_standard_layout<T>::value, "only standard objects");
-    static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
-    static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
-    return const_cast<const T*>(reinterpret_cast<const volatile T*>(
-        GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
-  }
-
-  // Like GetAsObject but get an array of simple, fixed-size types.
-  //
-  // Use a |count| of the required number of array elements, or kSizeAny.
-  // GetAllocSize() can be used to calculate the upper bound but isn't reliable
-  // because padding can make space for extra elements that were not written.
-  //
-  // Remember that an array of char is a string but may not be NUL terminated.
-  //
-  // There are no compile-time or run-time checks to ensure 32/64-bit size
-  // compatibilty when using these accessors. Only use fixed-size types such
-  // as char, float, double, or (u)intXX_t.
-  template <typename T>
-  T* GetAsArray(Reference ref, uint32_t type_id, size_t count) {
-    static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
-    return const_cast<T*>(reinterpret_cast<volatile T*>(
-        GetBlockData(ref, type_id, count * sizeof(T))));
-  }
-  template <typename T>
-  const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
-    static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
-    return const_cast<const char*>(reinterpret_cast<const volatile T*>(
-        GetBlockData(ref, type_id, count * sizeof(T))));
-  }
-
-  // Get the corresponding reference for an object held in persistent memory.
-  // If the |memory| is not valid or the type does not match, a kReferenceNull
-  // result will be returned.
-  Reference GetAsReference(const void* memory, uint32_t type_id) const;
-
-  // Get the number of bytes allocated to a block. This is useful when storing
-  // arrays in order to validate the ending boundary. The returned value will
-  // include any padding added to achieve the required alignment and so could
-  // be larger than given in the original Allocate() request.
-  size_t GetAllocSize(Reference ref) const;
-
-  // Access the internal "type" of an object. This generally isn't necessary
-  // but can be used to "clear" the type and so effectively mark it as deleted
-  // even though the memory stays valid and allocated. Changing the type is
-  // an atomic compare/exchange and so requires knowing the existing value.
-  // It will return false if the existing type is not what is expected.
-  //
-  // Changing the type doesn't mean the data is compatible with the new type.
-  // Passing true for |clear| will zero the memory after the type has been
-  // changed away from |from_type_id| but before it becomes |to_type_id| meaning
-  // that it is done in a manner that is thread-safe. Memory is guaranteed to
-  // be zeroed atomically by machine-word in a monotonically increasing order.
-  //
-  // It will likely be necessary to reconstruct the type before it can be used.
-  // Changing the type WILL NOT invalidate existing pointers to the data, either
-  // in this process or others, so changing the data structure could have
-  // unpredicatable results. USE WITH CARE!
-  uint32_t GetType(Reference ref) const;
-  bool ChangeType(Reference ref,
-                  uint32_t to_type_id,
-                  uint32_t from_type_id,
-                  bool clear);
-
-  // Allocated objects can be added to an internal list that can then be
-  // iterated over by other processes. If an allocated object can be found
-  // another way, such as by having its reference within a different object
-  // that will be made iterable, then this call is not necessary. This always
-  // succeeds unless corruption is detected; check IsCorrupted() to find out.
-  // Once an object is made iterable, its position in iteration can never
-  // change; new iterable objects will always be added after it in the series.
-  // Changing the type does not alter its "iterable" status.
-  void MakeIterable(Reference ref);
-
-  // Get the information about the amount of free space in the allocator. The
-  // amount of free space should be treated as approximate due to extras from
-  // alignment and metadata. Concurrent allocations from other threads will
-  // also make the true amount less than what is reported.
-  void GetMemoryInfo(MemoryInfo* meminfo) const;
-
-  // If there is some indication that the memory has become corrupted,
-  // calling this will attempt to prevent further damage by indicating to
-  // all processes that something is not as expected.
-  void SetCorrupt() const;
-
-  // This can be called to determine if corruption has been detected in the
-  // segment, possibly my a malicious actor. Once detected, future allocations
-  // will fail and iteration may not locate all objects.
-  bool IsCorrupt() const;
-
-  // Flag set if an allocation has failed because the memory segment was full.
-  bool IsFull() const;
-
-  // Update those "tracking" histograms which do not get updates during regular
-  // operation, such as how much memory is currently used. This should be
-  // called before such information is to be displayed or uploaded.
-  void UpdateTrackingHistograms();
-
-  // While the above works much like malloc & free, these next methods provide
-  // an "object" interface similar to new and delete.
-
-  // Reserve space in the memory segment of the desired |size| and |type_id|.
-  // A return value of zero indicates the allocation failed, otherwise the
-  // returned reference can be used by any process to get a real pointer via
-  // the GetAsObject() or GetAsArray calls. The actual allocated size may be
-  // larger and will always be a multiple of 8 bytes (64 bits).
-  Reference Allocate(size_t size, uint32_t type_id);
-
-  // Allocate and construct an object in persistent memory. The type must have
-  // both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId
-  // static constexpr fields that are used to ensure compatibility between
-  // software versions. An optional size parameter can be specified to force
-  // the allocation to be bigger than the size of the object; this is useful
-  // when the last field is actually variable length.
-  template <typename T>
-  T* New(size_t size) {
-    if (size < sizeof(T))
-      size = sizeof(T);
-    Reference ref = Allocate(size, T::kPersistentTypeId);
-    void* mem =
-        const_cast<void*>(GetBlockData(ref, T::kPersistentTypeId, size));
-    if (!mem)
-      return nullptr;
-    DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1));
-    return new (mem) T();
-  }
-  template <typename T>
-  T* New() {
-    return New<T>(sizeof(T));
-  }
-
-  // Similar to New, above, but construct the object out of an existing memory
-  // block and of an expected type. If |clear| is true, memory will be zeroed
-  // before construction. Though this is not standard object behavior, it
-  // is present to match with new allocations that always come from zeroed
-  // memory. Anything previously present simply ceases to exist; no destructor
-  // is called for it so explicitly Delete() the old object first if need be.
-  // Calling this will not invalidate existing pointers to the object, either
-  // in this process or others, so changing the object could have unpredictable
-  // results. USE WITH CARE!
-  template <typename T>
-  T* New(Reference ref, uint32_t from_type_id, bool clear) {
-    DCHECK_LE(sizeof(T), GetAllocSize(ref)) << "alloc not big enough for obj";
-    // Make sure the memory is appropriate. This won't be used until after
-    // the type is changed but checking first avoids the possibility of having
-    // to change the type back.
-    void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T)));
-    if (!mem)
-      return nullptr;
-    // Ensure the allocator's internal alignment is sufficient for this object.
-    // This protects against coding errors in the allocator.
-    DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1));
-    // Change the type, clearing the memory if so desired. The new type is
-    // "transitioning" so that there is no race condition with the construction
-    // of the object should another thread be simultaneously iterating over
-    // data. This will "acquire" the memory so no changes get reordered before
-    // it.
-    if (!ChangeType(ref, kTypeIdTransitioning, from_type_id, clear))
-      return nullptr;
-    // Construct an object of the desired type on this memory, just as if
-    // New() had been called to create it.
-    T* obj = new (mem) T();
-    // Finally change the type to the desired one. This will "release" all of
-    // the changes above and so provide a consistent view to other threads.
-    bool success =
-        ChangeType(ref, T::kPersistentTypeId, kTypeIdTransitioning, false);
-    DCHECK(success);
-    return obj;
-  }
-
-  // Deletes an object by destructing it and then changing the type to a
-  // different value (default 0).
-  template <typename T>
-  void Delete(T* obj, uint32_t new_type) {
-    // Get the reference for the object.
-    Reference ref = GetAsReference<T>(obj);
-    // First change the type to "transitioning" so there is no race condition
-    // where another thread could find the object through iteration while it
-    // is been destructed. This will "acquire" the memory so no changes get
-    // reordered before it. It will fail if |ref| is invalid.
-    if (!ChangeType(ref, kTypeIdTransitioning, T::kPersistentTypeId, false))
-      return;
-    // Destruct the object.
-    obj->~T();
-    // Finally change the type to the desired value. This will "release" all
-    // the changes above.
-    bool success = ChangeType(ref, new_type, kTypeIdTransitioning, false);
-    DCHECK(success);
-  }
-  template <typename T>
-  void Delete(T* obj) {
-    Delete<T>(obj, 0);
-  }
-
-  // As above but works with objects allocated from persistent memory.
-  template <typename T>
-  Reference GetAsReference(const T* obj) const {
-    return GetAsReference(obj, T::kPersistentTypeId);
-  }
-
-  // As above but works with an object allocated from persistent memory.
-  template <typename T>
-  void MakeIterable(const T* obj) {
-    MakeIterable(GetAsReference<T>(obj));
-  }
-
- protected:
-  enum MemoryType {
-    MEM_EXTERNAL,
-    MEM_MALLOC,
-    MEM_VIRTUAL,
-    MEM_SHARED,
-    MEM_FILE,
-  };
-
-  struct Memory {
-    Memory(void* b, MemoryType t) : base(b), type(t) {}
-
-    void* base;
-    MemoryType type;
-  };
-
-  // Constructs the allocator. Everything is the same as the public allocator
-  // except |memory| which is a structure with additional information besides
-  // the base address.
-  PersistentMemoryAllocator(Memory memory, size_t size, size_t page_size,
-                            uint64_t id, base::StringPiece name,
-                            bool readonly);
-
-  // Implementation of Flush that accepts how much to flush.
-  virtual void FlushPartial(size_t length, bool sync);
-
-  volatile char* const mem_base_;  // Memory base. (char so sizeof guaranteed 1)
-  const MemoryType mem_type_;      // Type of memory allocation.
-  const uint32_t mem_size_;        // Size of entire memory segment.
-  const uint32_t mem_page_;        // Page size allocations shouldn't cross.
-
- private:
-  struct SharedMetadata;
-  struct BlockHeader;
-  static const uint32_t kAllocAlignment;
-  static const Reference kReferenceQueue;
-
-  // The shared metadata is always located at the top of the memory segment.
-  // These convenience functions eliminate constant casting of the base
-  // pointer within the code.
-  const SharedMetadata* shared_meta() const {
-    return reinterpret_cast<const SharedMetadata*>(
-        const_cast<const char*>(mem_base_));
-  }
-  SharedMetadata* shared_meta() {
-    return reinterpret_cast<SharedMetadata*>(const_cast<char*>(mem_base_));
-  }
-
-  // Actual method for doing the allocation.
-  Reference AllocateImpl(size_t size, uint32_t type_id);
-
-  // Get the block header associated with a specific reference.
-  const volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id,
-                                       uint32_t size, bool queue_ok,
-                                       bool free_ok) const;
-  volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id, uint32_t size,
-                                 bool queue_ok, bool free_ok) {
-      return const_cast<volatile BlockHeader*>(
-          const_cast<const PersistentMemoryAllocator*>(this)->GetBlock(
-              ref, type_id, size, queue_ok, free_ok));
-  }
-
-  // Get the actual data within a block associated with a specific reference.
-  const volatile void* GetBlockData(Reference ref, uint32_t type_id,
-                                    uint32_t size) const;
-  volatile void* GetBlockData(Reference ref, uint32_t type_id,
-                              uint32_t size) {
-      return const_cast<volatile void*>(
-          const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData(
-              ref, type_id, size));
-  }
-
-  // Record an error in the internal histogram.
-  void RecordError(int error) const;
-
-  const size_t vm_page_size_;          // The page size used by the OS.
-  const bool readonly_;                // Indicates access to read-only memory.
-  mutable std::atomic<bool> corrupt_;  // Local version of "corrupted" flag.
-
-  HistogramBase* allocs_histogram_;  // Histogram recording allocs.
-  HistogramBase* used_histogram_;    // Histogram recording used space.
-  HistogramBase* errors_histogram_;  // Histogram recording errors.
-
-  friend class PersistentMemoryAllocatorTest;
-  FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
-  DISALLOW_COPY_AND_ASSIGN(PersistentMemoryAllocator);
-};
-
-
-// This allocator uses a local memory block it allocates from the general
-// heap. It is generally used when some kind of "death rattle" handler will
-// save the contents to persistent storage during process shutdown. It is
-// also useful for testing.
-class BASE_EXPORT LocalPersistentMemoryAllocator
-    : public PersistentMemoryAllocator {
- public:
-  LocalPersistentMemoryAllocator(size_t size, uint64_t id,
-                                 base::StringPiece name);
-  ~LocalPersistentMemoryAllocator() override;
-
- private:
-  // Allocates a block of local memory of the specified |size|, ensuring that
-  // the memory will not be physically allocated until accessed and will read
-  // as zero when that happens.
-  static Memory AllocateLocalMemory(size_t size);
-
-  // Deallocates a block of local |memory| of the specified |size|.
-  static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type);
-
-  DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator);
-};
-
-
-// This allocator takes a shared-memory object and performs allocation from
-// it. The memory must be previously mapped via Map() or MapAt(). The allocator
-// takes ownership of the memory object.
-class BASE_EXPORT SharedPersistentMemoryAllocator
-    : public PersistentMemoryAllocator {
- public:
-  SharedPersistentMemoryAllocator(std::unique_ptr<SharedMemory> memory,
-                                  uint64_t id,
-                                  base::StringPiece name,
-                                  bool read_only);
-  ~SharedPersistentMemoryAllocator() override;
-
-  SharedMemory* shared_memory() { return shared_memory_.get(); }
-
-  // Ensure that the memory isn't so invalid that it would crash when passing it
-  // to the allocator. This doesn't guarantee the data is valid, just that it
-  // won't cause the program to abort. The existing IsCorrupt() call will handle
-  // the rest.
-  static bool IsSharedMemoryAcceptable(const SharedMemory& memory);
-
- private:
-  std::unique_ptr<SharedMemory> shared_memory_;
-
-  DISALLOW_COPY_AND_ASSIGN(SharedPersistentMemoryAllocator);
-};
-
-
-#if !defined(OS_NACL)  // NACL doesn't support any kind of file access in build.
-// This allocator takes a memory-mapped file object and performs allocation
-// from it. The allocator takes ownership of the file object.
-class BASE_EXPORT FilePersistentMemoryAllocator
-    : public PersistentMemoryAllocator {
- public:
-  // A |max_size| of zero will use the length of the file as the maximum
-  // size. The |file| object must have been already created with sufficient
-  // permissions (read, read/write, or read/write/extend).
-  FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,
-                                size_t max_size,
-                                uint64_t id,
-                                base::StringPiece name,
-                                bool read_only);
-  ~FilePersistentMemoryAllocator() override;
-
-  // Ensure that the file isn't so invalid that it would crash when passing it
-  // to the allocator. This doesn't guarantee the file is valid, just that it
-  // won't cause the program to abort. The existing IsCorrupt() call will handle
-  // the rest.
-  static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
-
- protected:
-  // PersistentMemoryAllocator:
-  void FlushPartial(size_t length, bool sync) override;
-
- private:
-  std::unique_ptr<MemoryMappedFile> mapped_file_;
-
-  DISALLOW_COPY_AND_ASSIGN(FilePersistentMemoryAllocator);
-};
-#endif  // !defined(OS_NACL)
-
-// An allocation that is defined but not executed until required at a later
-// time. This allows for potential users of an allocation to be decoupled
-// from the logic that defines it. In addition, there can be multiple users
-// of the same allocation or any region thereof that are guaranteed to always
-// use the same space. It's okay to copy/move these objects.
-//
-// This is a top-level class instead of an inner class of the PMA so that it
-// can be forward-declared in other header files without the need to include
-// the full contents of this file.
-class BASE_EXPORT DelayedPersistentAllocation {
- public:
-  using Reference = PersistentMemoryAllocator::Reference;
-
-  // Creates a delayed allocation using the specified |allocator|. When
-  // needed, the memory will be allocated using the specified |type| and
-  // |size|. If |offset| is given, the returned pointer will be at that
-  // offset into the segment; this allows combining allocations into a
-  // single persistent segment to reduce overhead and means an "all or
-  // nothing" request. Note that |size| is always the total memory size
-  // and |offset| is just indicating the start of a block within it.  If
-  // |make_iterable| was true, the allocation will made iterable when it
-  // is created; already existing allocations are not changed.
-  //
-  // Once allocated, a reference to the segment will be stored at |ref|.
-  // This shared location must be initialized to zero (0); it is checked
-  // with every Get() request to see if the allocation has already been
-  // done. If reading |ref| outside of this object, be sure to do an
-  // "acquire" load. Don't write to it -- leave that to this object.
-  //
-  // For convenience, methods taking both Atomic32 and std::atomic<Reference>
-  // are defined.
-  DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
-                              subtle::Atomic32* ref,
-                              uint32_t type,
-                              size_t size,
-                              bool make_iterable);
-  DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
-                              subtle::Atomic32* ref,
-                              uint32_t type,
-                              size_t size,
-                              size_t offset,
-                              bool make_iterable);
-  DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
-                              std::atomic<Reference>* ref,
-                              uint32_t type,
-                              size_t size,
-                              bool make_iterable);
-  DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
-                              std::atomic<Reference>* ref,
-                              uint32_t type,
-                              size_t size,
-                              size_t offset,
-                              bool make_iterable);
-  ~DelayedPersistentAllocation();
-
-  // Gets a pointer to the defined allocation. This will realize the request
-  // and update the reference provided during construction. The memory will
-  // be zeroed the first time it is returned, after that it is shared with
-  // all other Get() requests and so shows any changes made to it elsewhere.
-  //
-  // If the allocation fails for any reason, null will be returned. This works
-  // even on "const" objects because the allocation is already defined, just
-  // delayed.
-  void* Get() const;
-
-  // Gets the internal reference value. If this returns a non-zero value then
-  // a subsequent call to Get() will do nothing but convert that reference into
-  // a memory location -- useful for accessing an existing allocation without
-  // creating one unnecessarily.
-  Reference reference() const {
-    return reference_->load(std::memory_order_relaxed);
-  }
-
- private:
-  // The underlying object that does the actual allocation of memory. Its
-  // lifetime must exceed that of all DelayedPersistentAllocation objects
-  // that use it.
-  PersistentMemoryAllocator* const allocator_;
-
-  // The desired type and size of the allocated segment plus the offset
-  // within it for the defined request.
-  const uint32_t type_;
-  const uint32_t size_;
-  const uint32_t offset_;
-
-  // Flag indicating if allocation should be made iterable when done.
-  const bool make_iterable_;
-
-  // The location at which a reference to the allocated segment is to be
-  // stored once the allocation is complete. If multiple delayed allocations
-  // share the same pointer then an allocation on one will amount to an
-  // allocation for all.
-  volatile std::atomic<Reference>* const reference_;
-
-  // No DISALLOW_COPY_AND_ASSIGN as it's okay to copy/move these objects.
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
diff --git a/base/metrics/persistent_sample_map.cc b/base/metrics/persistent_sample_map.cc
deleted file mode 100644
index f38b9d1..0000000
--- a/base/metrics/persistent_sample_map.cc
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/persistent_sample_map.h"
-
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/metrics/histogram_macros.h"
-#include "base/metrics/persistent_histogram_allocator.h"
-#include "base/numerics/safe_conversions.h"
-#include "base/stl_util.h"
-
-namespace base {
-
-typedef HistogramBase::Count Count;
-typedef HistogramBase::Sample Sample;
-
-namespace {
-
-// An iterator for going through a PersistentSampleMap. The logic here is
-// identical to that of SampleMapIterator but with different data structures.
-// Changes here likely need to be duplicated there.
-class PersistentSampleMapIterator : public SampleCountIterator {
- public:
-  typedef std::map<HistogramBase::Sample, HistogramBase::Count*>
-      SampleToCountMap;
-
-  explicit PersistentSampleMapIterator(const SampleToCountMap& sample_counts);
-  ~PersistentSampleMapIterator() override;
-
-  // SampleCountIterator:
-  bool Done() const override;
-  void Next() override;
-  void Get(HistogramBase::Sample* min,
-           int64_t* max,
-           HistogramBase::Count* count) const override;
-
- private:
-  void SkipEmptyBuckets();
-
-  SampleToCountMap::const_iterator iter_;
-  const SampleToCountMap::const_iterator end_;
-};
-
-PersistentSampleMapIterator::PersistentSampleMapIterator(
-    const SampleToCountMap& sample_counts)
-    : iter_(sample_counts.begin()),
-      end_(sample_counts.end()) {
-  SkipEmptyBuckets();
-}
-
-PersistentSampleMapIterator::~PersistentSampleMapIterator() = default;
-
-bool PersistentSampleMapIterator::Done() const {
-  return iter_ == end_;
-}
-
-void PersistentSampleMapIterator::Next() {
-  DCHECK(!Done());
-  ++iter_;
-  SkipEmptyBuckets();
-}
-
-void PersistentSampleMapIterator::Get(Sample* min,
-                                      int64_t* max,
-                                      Count* count) const {
-  DCHECK(!Done());
-  if (min)
-    *min = iter_->first;
-  if (max)
-    *max = strict_cast<int64_t>(iter_->first) + 1;
-  if (count)
-    *count = *iter_->second;
-}
-
-void PersistentSampleMapIterator::SkipEmptyBuckets() {
-  while (!Done() && *iter_->second == 0) {
-    ++iter_;
-  }
-}
-
-// This structure holds an entry for a PersistentSampleMap within a persistent
-// memory allocator. The "id" must be unique across all maps held by an
-// allocator or they will get attached to the wrong sample map.
-struct SampleRecord {
-  // SHA1(SampleRecord): Increment this if structure changes!
-  static constexpr uint32_t kPersistentTypeId = 0x8FE6A69F + 1;
-
-  // Expected size for 32/64-bit check.
-  static constexpr size_t kExpectedInstanceSize = 16;
-
-  uint64_t id;   // Unique identifier of owner.
-  Sample value;  // The value for which this record holds a count.
-  Count count;   // The count associated with the above value.
-};
-
-}  // namespace
-
-PersistentSampleMap::PersistentSampleMap(
-    uint64_t id,
-    PersistentHistogramAllocator* allocator,
-    Metadata* meta)
-    : HistogramSamples(id, meta), allocator_(allocator) {}
-
-PersistentSampleMap::~PersistentSampleMap() {
-  if (records_)
-    records_->Release(this);
-}
-
-void PersistentSampleMap::Accumulate(Sample value, Count count) {
-#if 0  // TODO(bcwhite) Re-enable efficient version after crbug.com/682680.
-  *GetOrCreateSampleCountStorage(value) += count;
-#else
-  Count* local_count_ptr = GetOrCreateSampleCountStorage(value);
-  if (count < 0) {
-    if (*local_count_ptr < -count)
-      RecordNegativeSample(SAMPLES_ACCUMULATE_WENT_NEGATIVE, -count);
-    else
-      RecordNegativeSample(SAMPLES_ACCUMULATE_NEGATIVE_COUNT, -count);
-    *local_count_ptr += count;
-  } else {
-    Sample old_value = *local_count_ptr;
-    Sample new_value = old_value + count;
-    *local_count_ptr = new_value;
-    if ((new_value >= 0) != (old_value >= 0))
-      RecordNegativeSample(SAMPLES_ACCUMULATE_OVERFLOW, count);
-  }
-#endif
-  IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count);
-}
-
-Count PersistentSampleMap::GetCount(Sample value) const {
-  // Have to override "const" to make sure all samples have been loaded before
-  // being able to know what value to return.
-  Count* count_pointer =
-      const_cast<PersistentSampleMap*>(this)->GetSampleCountStorage(value);
-  return count_pointer ? *count_pointer : 0;
-}
-
-Count PersistentSampleMap::TotalCount() const {
-  // Have to override "const" in order to make sure all samples have been
-  // loaded before trying to iterate over the map.
-  const_cast<PersistentSampleMap*>(this)->ImportSamples(-1, true);
-
-  Count count = 0;
-  for (const auto& entry : sample_counts_) {
-    count += *entry.second;
-  }
-  return count;
-}
-
-std::unique_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const {
-  // Have to override "const" in order to make sure all samples have been
-  // loaded before trying to iterate over the map.
-  const_cast<PersistentSampleMap*>(this)->ImportSamples(-1, true);
-  return WrapUnique(new PersistentSampleMapIterator(sample_counts_));
-}
-
-// static
-PersistentMemoryAllocator::Reference
-PersistentSampleMap::GetNextPersistentRecord(
-    PersistentMemoryAllocator::Iterator& iterator,
-    uint64_t* sample_map_id) {
-  const SampleRecord* record = iterator.GetNextOfObject<SampleRecord>();
-  if (!record)
-    return 0;
-
-  *sample_map_id = record->id;
-  return iterator.GetAsReference(record);
-}
-
-// static
-PersistentMemoryAllocator::Reference
-PersistentSampleMap::CreatePersistentRecord(
-    PersistentMemoryAllocator* allocator,
-    uint64_t sample_map_id,
-    Sample value) {
-  SampleRecord* record = allocator->New<SampleRecord>();
-  if (!record) {
-    NOTREACHED() << "full=" << allocator->IsFull()
-                 << ", corrupt=" << allocator->IsCorrupt();
-    return 0;
-  }
-
-  record->id = sample_map_id;
-  record->value = value;
-  record->count = 0;
-
-  PersistentMemoryAllocator::Reference ref = allocator->GetAsReference(record);
-  allocator->MakeIterable(ref);
-  return ref;
-}
-
-bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter,
-                                          Operator op) {
-  Sample min;
-  int64_t max;
-  Count count;
-  for (; !iter->Done(); iter->Next()) {
-    iter->Get(&min, &max, &count);
-    if (count == 0)
-      continue;
-    if (strict_cast<int64_t>(min) + 1 != max)
-      return false;  // SparseHistogram only supports bucket with size 1.
-    *GetOrCreateSampleCountStorage(min) +=
-        (op == HistogramSamples::ADD) ? count : -count;
-  }
-  return true;
-}
-
-Count* PersistentSampleMap::GetSampleCountStorage(Sample value) {
-  // If |value| is already in the map, just return that.
-  auto it = sample_counts_.find(value);
-  if (it != sample_counts_.end())
-    return it->second;
-
-  // Import any new samples from persistent memory looking for the value.
-  return ImportSamples(value, false);
-}
-
-Count* PersistentSampleMap::GetOrCreateSampleCountStorage(Sample value) {
-  // Get any existing count storage.
-  Count* count_pointer = GetSampleCountStorage(value);
-  if (count_pointer)
-    return count_pointer;
-
-  // Create a new record in persistent memory for the value. |records_| will
-  // have been initialized by the GetSampleCountStorage() call above.
-  DCHECK(records_);
-  PersistentMemoryAllocator::Reference ref = records_->CreateNew(value);
-  if (!ref) {
-    // If a new record could not be created then the underlying allocator is
-    // full or corrupt. Instead, allocate the counter from the heap. This
-    // sample will not be persistent, will not be shared, and will leak...
-    // but it's better than crashing.
-    count_pointer = new Count(0);
-    sample_counts_[value] = count_pointer;
-    return count_pointer;
-  }
-
-  // A race condition between two independent processes (i.e. two independent
-  // histogram objects sharing the same sample data) could cause two of the
-  // above records to be created. The allocator, however, forces a strict
-  // ordering on iterable objects so use the import method to actually add the
-  // just-created record. This ensures that all PersistentSampleMap objects
-  // will always use the same record, whichever was first made iterable.
-  // Thread-safety within a process where multiple threads use the same
-  // histogram object is delegated to the controlling histogram object which,
-  // for sparse histograms, is a lock object.
-  count_pointer = ImportSamples(value, false);
-  DCHECK(count_pointer);
-  return count_pointer;
-}
-
-PersistentSampleMapRecords* PersistentSampleMap::GetRecords() {
-  // The |records_| pointer is lazily fetched from the |allocator_| only on
-  // first use. Sometimes duplicate histograms are created by race conditions
-  // and if both were to grab the records object, there would be a conflict.
-  // Use of a histogram, and thus a call to this method, won't occur until
-  // after the histogram has been de-dup'd.
-  if (!records_)
-    records_ = allocator_->UseSampleMapRecords(id(), this);
-  return records_;
-}
-
-Count* PersistentSampleMap::ImportSamples(Sample until_value,
-                                          bool import_everything) {
-  Count* found_count = nullptr;
-  PersistentMemoryAllocator::Reference ref;
-  PersistentSampleMapRecords* records = GetRecords();
-  while ((ref = records->GetNext()) != 0) {
-    SampleRecord* record = records->GetAsObject<SampleRecord>(ref);
-    if (!record)
-      continue;
-
-    DCHECK_EQ(id(), record->id);
-
-    // Check if the record's value is already known.
-    if (!ContainsKey(sample_counts_, record->value)) {
-      // No: Add it to map of known values.
-      sample_counts_[record->value] = &record->count;
-    } else {
-      // Yes: Ignore it; it's a duplicate caused by a race condition -- see
-      // code & comment in GetOrCreateSampleCountStorage() for details.
-      // Check that nothing ever operated on the duplicate record.
-      DCHECK_EQ(0, record->count);
-    }
-
-    // Check if it's the value being searched for and, if so, keep a pointer
-    // to return later. Stop here unless everything is being imported.
-    // Because race conditions can cause multiple records for a single value,
-    // be sure to return the first one found.
-    if (record->value == until_value) {
-      if (!found_count)
-        found_count = &record->count;
-      if (!import_everything)
-        break;
-    }
-  }
-
-  return found_count;
-}
-
-}  // namespace base
diff --git a/base/metrics/persistent_sample_map.h b/base/metrics/persistent_sample_map.h
deleted file mode 100644
index 853f862..0000000
--- a/base/metrics/persistent_sample_map.h
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// PersistentSampleMap implements HistogramSamples interface. It is used
-// by the SparseHistogram class to store samples in persistent memory which
-// allows it to be shared between processes or live across restarts.
-
-#ifndef BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
-#define BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
-
-#include <stdint.h>
-
-#include <map>
-#include <memory>
-
-#include "base/compiler_specific.h"
-#include "base/macros.h"
-#include "base/metrics/histogram_base.h"
-#include "base/metrics/histogram_samples.h"
-#include "base/metrics/persistent_memory_allocator.h"
-
-namespace base {
-
-class PersistentHistogramAllocator;
-class PersistentSampleMapRecords;
-
-// The logic here is similar to that of SampleMap but with different data
-// structures. Changes here likely need to be duplicated there.
-class BASE_EXPORT PersistentSampleMap : public HistogramSamples {
- public:
-  // Constructs a persistent sample map using a PersistentHistogramAllocator
-  // as the data source for persistent records.
-  PersistentSampleMap(uint64_t id,
-                      PersistentHistogramAllocator* allocator,
-                      Metadata* meta);
-
-  ~PersistentSampleMap() override;
-
-  // HistogramSamples:
-  void Accumulate(HistogramBase::Sample value,
-                  HistogramBase::Count count) override;
-  HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
-  HistogramBase::Count TotalCount() const override;
-  std::unique_ptr<SampleCountIterator> Iterator() const override;
-
-  // Uses a persistent-memory |iterator| to locate and return information about
-  // the next record holding information for a PersistentSampleMap. The record
-  // could be for any Map so return the |sample_map_id| as well.
-  static PersistentMemoryAllocator::Reference GetNextPersistentRecord(
-      PersistentMemoryAllocator::Iterator& iterator,
-      uint64_t* sample_map_id);
-
-  // Creates a new record in an |allocator| storing count information for a
-  // specific sample |value| of a histogram with the given |sample_map_id|.
-  static PersistentMemoryAllocator::Reference CreatePersistentRecord(
-      PersistentMemoryAllocator* allocator,
-      uint64_t sample_map_id,
-      HistogramBase::Sample value);
-
- protected:
-  // Performs arithemetic. |op| is ADD or SUBTRACT.
-  bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override;
-
-  // Gets a pointer to a "count" corresponding to a given |value|. Returns NULL
-  // if sample does not exist.
-  HistogramBase::Count* GetSampleCountStorage(HistogramBase::Sample value);
-
-  // Gets a pointer to a "count" corresponding to a given |value|, creating
-  // the sample (initialized to zero) if it does not already exists.
-  HistogramBase::Count* GetOrCreateSampleCountStorage(
-      HistogramBase::Sample value);
-
- private:
-  // Gets the object that manages persistent records. This returns the
-  // |records_| member after first initializing it if necessary.
-  PersistentSampleMapRecords* GetRecords();
-
-  // Imports samples from persistent memory by iterating over all sample
-  // records found therein, adding them to the sample_counts_ map. If a
-  // count for the sample |until_value| is found, stop the import and return
-  // a pointer to that counter. If that value is not found, null will be
-  // returned after all currently available samples have been loaded. Pass
-  // true for |import_everything| to force the importing of all available
-  // samples even if a match is found.
-  HistogramBase::Count* ImportSamples(HistogramBase::Sample until_value,
-                                      bool import_everything);
-
-  // All created/loaded sample values and their associated counts. The storage
-  // for the actual Count numbers is owned by the |records_| object and its
-  // underlying allocator.
-  std::map<HistogramBase::Sample, HistogramBase::Count*> sample_counts_;
-
-  // The allocator that manages histograms inside persistent memory. This is
-  // owned externally and is expected to live beyond the life of this object.
-  PersistentHistogramAllocator* allocator_;
-
-  // The object that manages sample records inside persistent memory. This is
-  // owned by the |allocator_| object (above) and so, like it, is expected to
-  // live beyond the life of this object. This value is lazily-initialized on
-  // first use via the GetRecords() accessor method.
-  PersistentSampleMapRecords* records_ = nullptr;
-
-  DISALLOW_COPY_AND_ASSIGN(PersistentSampleMap);
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
diff --git a/base/metrics/record_histogram_checker.h b/base/metrics/record_histogram_checker.h
deleted file mode 100644
index 75bc336..0000000
--- a/base/metrics/record_histogram_checker.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_RECORD_HISTOGRAM_CHECKER_H_
-#define BASE_METRICS_RECORD_HISTOGRAM_CHECKER_H_
-
-#include <stdint.h>
-
-#include "base/base_export.h"
-
-namespace base {
-
-// RecordHistogramChecker provides an interface for checking whether
-// the given histogram should be recorded.
-class BASE_EXPORT RecordHistogramChecker {
- public:
-  virtual ~RecordHistogramChecker() = default;
-
-  // Returns true iff the given histogram should be recorded.
-  // This method may be called on any thread, so it should not mutate any state.
-  virtual bool ShouldRecord(uint64_t histogram_hash) const = 0;
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_RECORD_HISTOGRAM_CHECKER_H_
diff --git a/base/metrics/sample_map.cc b/base/metrics/sample_map.cc
deleted file mode 100644
index c6dce29..0000000
--- a/base/metrics/sample_map.cc
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/sample_map.h"
-
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/numerics/safe_conversions.h"
-#include "base/stl_util.h"
-
-namespace base {
-
-typedef HistogramBase::Count Count;
-typedef HistogramBase::Sample Sample;
-
-namespace {
-
-// An iterator for going through a SampleMap. The logic here is identical
-// to that of PersistentSampleMapIterator but with different data structures.
-// Changes here likely need to be duplicated there.
-class SampleMapIterator : public SampleCountIterator {
- public:
-  typedef std::map<HistogramBase::Sample, HistogramBase::Count>
-      SampleToCountMap;
-
-  explicit SampleMapIterator(const SampleToCountMap& sample_counts);
-  ~SampleMapIterator() override;
-
-  // SampleCountIterator:
-  bool Done() const override;
-  void Next() override;
-  void Get(HistogramBase::Sample* min,
-           int64_t* max,
-           HistogramBase::Count* count) const override;
-
- private:
-  void SkipEmptyBuckets();
-
-  SampleToCountMap::const_iterator iter_;
-  const SampleToCountMap::const_iterator end_;
-};
-
-SampleMapIterator::SampleMapIterator(const SampleToCountMap& sample_counts)
-    : iter_(sample_counts.begin()),
-      end_(sample_counts.end()) {
-  SkipEmptyBuckets();
-}
-
-SampleMapIterator::~SampleMapIterator() = default;
-
-bool SampleMapIterator::Done() const {
-  return iter_ == end_;
-}
-
-void SampleMapIterator::Next() {
-  DCHECK(!Done());
-  ++iter_;
-  SkipEmptyBuckets();
-}
-
-void SampleMapIterator::Get(Sample* min, int64_t* max, Count* count) const {
-  DCHECK(!Done());
-  if (min)
-    *min = iter_->first;
-  if (max)
-    *max = strict_cast<int64_t>(iter_->first) + 1;
-  if (count)
-    *count = iter_->second;
-}
-
-void SampleMapIterator::SkipEmptyBuckets() {
-  while (!Done() && iter_->second == 0) {
-    ++iter_;
-  }
-}
-
-}  // namespace
-
-SampleMap::SampleMap() : SampleMap(0) {}
-
-SampleMap::SampleMap(uint64_t id) : HistogramSamples(id, new LocalMetadata()) {}
-
-SampleMap::~SampleMap() {
-  delete static_cast<LocalMetadata*>(meta());
-}
-
-void SampleMap::Accumulate(Sample value, Count count) {
-  sample_counts_[value] += count;
-  IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count);
-}
-
-Count SampleMap::GetCount(Sample value) const {
-  std::map<Sample, Count>::const_iterator it = sample_counts_.find(value);
-  if (it == sample_counts_.end())
-    return 0;
-  return it->second;
-}
-
-Count SampleMap::TotalCount() const {
-  Count count = 0;
-  for (const auto& entry : sample_counts_) {
-    count += entry.second;
-  }
-  return count;
-}
-
-std::unique_ptr<SampleCountIterator> SampleMap::Iterator() const {
-  return WrapUnique(new SampleMapIterator(sample_counts_));
-}
-
-bool SampleMap::AddSubtractImpl(SampleCountIterator* iter, Operator op) {
-  Sample min;
-  int64_t max;
-  Count count;
-  for (; !iter->Done(); iter->Next()) {
-    iter->Get(&min, &max, &count);
-    if (strict_cast<int64_t>(min) + 1 != max)
-      return false;  // SparseHistogram only supports bucket with size 1.
-
-    sample_counts_[min] += (op == HistogramSamples::ADD) ? count : -count;
-  }
-  return true;
-}
-
-}  // namespace base
diff --git a/base/metrics/sample_map.h b/base/metrics/sample_map.h
deleted file mode 100644
index 7458e05..0000000
--- a/base/metrics/sample_map.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// SampleMap implements HistogramSamples interface. It is used by the
-// SparseHistogram class to store samples.
-
-#ifndef BASE_METRICS_SAMPLE_MAP_H_
-#define BASE_METRICS_SAMPLE_MAP_H_
-
-#include <stdint.h>
-
-#include <map>
-#include <memory>
-
-#include "base/compiler_specific.h"
-#include "base/macros.h"
-#include "base/metrics/histogram_base.h"
-#include "base/metrics/histogram_samples.h"
-
-namespace base {
-
-// The logic here is similar to that of PersistentSampleMap but with different
-// data structures. Changes here likely need to be duplicated there.
-class BASE_EXPORT SampleMap : public HistogramSamples {
- public:
-  SampleMap();
-  explicit SampleMap(uint64_t id);
-  ~SampleMap() override;
-
-  // HistogramSamples:
-  void Accumulate(HistogramBase::Sample value,
-                  HistogramBase::Count count) override;
-  HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
-  HistogramBase::Count TotalCount() const override;
-  std::unique_ptr<SampleCountIterator> Iterator() const override;
-
- protected:
-  // Performs arithemetic. |op| is ADD or SUBTRACT.
-  bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override;
-
- private:
-  std::map<HistogramBase::Sample, HistogramBase::Count> sample_counts_;
-
-  DISALLOW_COPY_AND_ASSIGN(SampleMap);
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_SAMPLE_MAP_H_
diff --git a/base/metrics/sample_vector.cc b/base/metrics/sample_vector.cc
deleted file mode 100644
index cf8634e..0000000
--- a/base/metrics/sample_vector.cc
+++ /dev/null
@@ -1,429 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/sample_vector.h"
-
-#include "base/lazy_instance.h"
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/metrics/persistent_memory_allocator.h"
-#include "base/numerics/safe_conversions.h"
-#include "base/synchronization/lock.h"
-#include "base/threading/platform_thread.h"
-
-// This SampleVector makes use of the single-sample embedded in the base
-// HistogramSamples class. If the count is non-zero then there is guaranteed
-// (within the bounds of "eventual consistency") to be no allocated external
-// storage. Once the full counts storage is allocated, the single-sample must
-// be extracted and disabled.
-
-namespace base {
-
-typedef HistogramBase::Count Count;
-typedef HistogramBase::Sample Sample;
-
-SampleVectorBase::SampleVectorBase(uint64_t id,
-                                   Metadata* meta,
-                                   const BucketRanges* bucket_ranges)
-    : HistogramSamples(id, meta), bucket_ranges_(bucket_ranges) {
-  CHECK_GE(bucket_ranges_->bucket_count(), 1u);
-}
-
-SampleVectorBase::~SampleVectorBase() = default;
-
-void SampleVectorBase::Accumulate(Sample value, Count count) {
-  const size_t bucket_index = GetBucketIndex(value);
-
-  // Handle the single-sample case.
-  if (!counts()) {
-    // Try to accumulate the parameters into the single-count entry.
-    if (AccumulateSingleSample(value, count, bucket_index)) {
-      // A race condition could lead to a new single-sample being accumulated
-      // above just after another thread executed the MountCountsStorage below.
-      // Since it is mounted, it could be mounted elsewhere and have values
-      // written to it. It's not allowed to have both a single-sample and
-      // entries in the counts array so move the single-sample.
-      if (counts())
-        MoveSingleSampleToCounts();
-      return;
-    }
-
-    // Need real storage to store both what was in the single-sample plus the
-    // parameter information.
-    MountCountsStorageAndMoveSingleSample();
-  }
-
-  // Handle the multi-sample case.
-  Count new_value =
-      subtle::NoBarrier_AtomicIncrement(&counts()[bucket_index], count);
-  IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count);
-
-  // TODO(bcwhite) Remove after crbug.com/682680.
-  Count old_value = new_value - count;
-  if ((new_value >= 0) != (old_value >= 0) && count > 0)
-    RecordNegativeSample(SAMPLES_ACCUMULATE_OVERFLOW, count);
-}
-
-Count SampleVectorBase::GetCount(Sample value) const {
-  return GetCountAtIndex(GetBucketIndex(value));
-}
-
-Count SampleVectorBase::TotalCount() const {
-  // Handle the single-sample case.
-  SingleSample sample = single_sample().Load();
-  if (sample.count != 0)
-    return sample.count;
-
-  // Handle the multi-sample case.
-  if (counts() || MountExistingCountsStorage()) {
-    Count count = 0;
-    size_t size = counts_size();
-    const HistogramBase::AtomicCount* counts_array = counts();
-    for (size_t i = 0; i < size; ++i) {
-      count += subtle::NoBarrier_Load(&counts_array[i]);
-    }
-    return count;
-  }
-
-  // And the no-value case.
-  return 0;
-}
-
-Count SampleVectorBase::GetCountAtIndex(size_t bucket_index) const {
-  DCHECK(bucket_index < counts_size());
-
-  // Handle the single-sample case.
-  SingleSample sample = single_sample().Load();
-  if (sample.count != 0)
-    return sample.bucket == bucket_index ? sample.count : 0;
-
-  // Handle the multi-sample case.
-  if (counts() || MountExistingCountsStorage())
-    return subtle::NoBarrier_Load(&counts()[bucket_index]);
-
-  // And the no-value case.
-  return 0;
-}
-
-std::unique_ptr<SampleCountIterator> SampleVectorBase::Iterator() const {
-  // Handle the single-sample case.
-  SingleSample sample = single_sample().Load();
-  if (sample.count != 0) {
-    return std::make_unique<SingleSampleIterator>(
-        bucket_ranges_->range(sample.bucket),
-        bucket_ranges_->range(sample.bucket + 1), sample.count, sample.bucket);
-  }
-
-  // Handle the multi-sample case.
-  if (counts() || MountExistingCountsStorage()) {
-    return std::make_unique<SampleVectorIterator>(counts(), counts_size(),
-                                                  bucket_ranges_);
-  }
-
-  // And the no-value case.
-  return std::make_unique<SampleVectorIterator>(nullptr, 0, bucket_ranges_);
-}
-
-bool SampleVectorBase::AddSubtractImpl(SampleCountIterator* iter,
-                                       HistogramSamples::Operator op) {
-  // Stop now if there's nothing to do.
-  if (iter->Done())
-    return true;
-
-  // Get the first value and its index.
-  HistogramBase::Sample min;
-  int64_t max;
-  HistogramBase::Count count;
-  iter->Get(&min, &max, &count);
-  size_t dest_index = GetBucketIndex(min);
-
-  // The destination must be a superset of the source meaning that though the
-  // incoming ranges will find an exact match, the incoming bucket-index, if
-  // it exists, may be offset from the destination bucket-index. Calculate
-  // that offset of the passed iterator; there are are no overflow checks
-  // because 2's compliment math will work it out in the end.
-  //
-  // Because GetBucketIndex() always returns the same true or false result for
-  // a given iterator object, |index_offset| is either set here and used below,
-  // or never set and never used. The compiler doesn't know this, though, which
-  // is why it's necessary to initialize it to something.
-  size_t index_offset = 0;
-  size_t iter_index;
-  if (iter->GetBucketIndex(&iter_index))
-    index_offset = dest_index - iter_index;
-  if (dest_index >= counts_size())
-    return false;
-
-  // Post-increment. Information about the current sample is not available
-  // after this point.
-  iter->Next();
-
-  // Single-value storage is possible if there is no counts storage and the
-  // retrieved entry is the only one in the iterator.
-  if (!counts()) {
-    if (iter->Done()) {
-      // Don't call AccumulateSingleSample because that updates sum and count
-      // which was already done by the caller of this method.
-      if (single_sample().Accumulate(
-              dest_index, op == HistogramSamples::ADD ? count : -count)) {
-        // Handle race-condition that mounted counts storage between above and
-        // here.
-        if (counts())
-          MoveSingleSampleToCounts();
-        return true;
-      }
-    }
-
-    // The counts storage will be needed to hold the multiple incoming values.
-    MountCountsStorageAndMoveSingleSample();
-  }
-
-  // Go through the iterator and add the counts into correct bucket.
-  while (true) {
-    // Ensure that the sample's min/max match the ranges min/max.
-    if (min != bucket_ranges_->range(dest_index) ||
-        max != bucket_ranges_->range(dest_index + 1)) {
-      NOTREACHED() << "sample=" << min << "," << max
-                   << "; range=" << bucket_ranges_->range(dest_index) << ","
-                   << bucket_ranges_->range(dest_index + 1);
-      return false;
-    }
-
-    // Sample's bucket matches exactly. Adjust count.
-    subtle::NoBarrier_AtomicIncrement(
-        &counts()[dest_index], op == HistogramSamples::ADD ? count : -count);
-
-    // Advance to the next iterable sample. See comments above for how
-    // everything works.
-    if (iter->Done())
-      return true;
-    iter->Get(&min, &max, &count);
-    if (iter->GetBucketIndex(&iter_index)) {
-      // Destination bucket is a known offset from the source bucket.
-      dest_index = iter_index + index_offset;
-    } else {
-      // Destination bucket has to be determined anew each time.
-      dest_index = GetBucketIndex(min);
-    }
-    if (dest_index >= counts_size())
-      return false;
-    iter->Next();
-  }
-}
-
-// Use simple binary search.  This is very general, but there are better
-// approaches if we knew that the buckets were linearly distributed.
-size_t SampleVectorBase::GetBucketIndex(Sample value) const {
-  size_t bucket_count = bucket_ranges_->bucket_count();
-  CHECK_GE(bucket_count, 1u);
-  CHECK_GE(value, bucket_ranges_->range(0));
-  CHECK_LT(value, bucket_ranges_->range(bucket_count));
-
-  size_t under = 0;
-  size_t over = bucket_count;
-  size_t mid;
-  do {
-    DCHECK_GE(over, under);
-    mid = under + (over - under)/2;
-    if (mid == under)
-      break;
-    if (bucket_ranges_->range(mid) <= value)
-      under = mid;
-    else
-      over = mid;
-  } while (true);
-
-  DCHECK_LE(bucket_ranges_->range(mid), value);
-  CHECK_GT(bucket_ranges_->range(mid + 1), value);
-  return mid;
-}
-
-void SampleVectorBase::MoveSingleSampleToCounts() {
-  DCHECK(counts());
-
-  // Disable the single-sample since there is now counts storage for the data.
-  SingleSample sample = single_sample().Extract(/*disable=*/true);
-
-  // Stop here if there is no "count" as trying to find the bucket index of
-  // an invalid (including zero) "value" will crash.
-  if (sample.count == 0)
-    return;
-
-  // Move the value into storage. Sum and redundant-count already account
-  // for this entry so no need to call IncreaseSumAndCount().
-  subtle::NoBarrier_AtomicIncrement(&counts()[sample.bucket], sample.count);
-}
-
-void SampleVectorBase::MountCountsStorageAndMoveSingleSample() {
-  // There are many SampleVector objects and the lock is needed very
-  // infrequently (just when advancing from single-sample to multi-sample) so
-  // define a single, global lock that all can use. This lock only prevents
-  // concurrent entry into the code below; access and updates to |counts_|
-  // still requires atomic operations.
-  static LazyInstance<Lock>::Leaky counts_lock = LAZY_INSTANCE_INITIALIZER;
-  if (subtle::NoBarrier_Load(&counts_) == 0) {
-    AutoLock lock(counts_lock.Get());
-    if (subtle::NoBarrier_Load(&counts_) == 0) {
-      // Create the actual counts storage while the above lock is acquired.
-      HistogramBase::Count* counts = CreateCountsStorageWhileLocked();
-      DCHECK(counts);
-
-      // Point |counts_| to the newly created storage. This is done while
-      // locked to prevent possible concurrent calls to CreateCountsStorage
-      // but, between that call and here, other threads could notice the
-      // existence of the storage and race with this to set_counts(). That's
-      // okay because (a) it's atomic and (b) it always writes the same value.
-      set_counts(counts);
-    }
-  }
-
-  // Move any single-sample into the newly mounted storage.
-  MoveSingleSampleToCounts();
-}
-
-SampleVector::SampleVector(const BucketRanges* bucket_ranges)
-    : SampleVector(0, bucket_ranges) {}
-
-SampleVector::SampleVector(uint64_t id, const BucketRanges* bucket_ranges)
-    : SampleVectorBase(id, new LocalMetadata(), bucket_ranges) {}
-
-SampleVector::~SampleVector() {
-  delete static_cast<LocalMetadata*>(meta());
-}
-
-bool SampleVector::MountExistingCountsStorage() const {
-  // There is never any existing storage other than what is already in use.
-  return counts() != nullptr;
-}
-
-HistogramBase::AtomicCount* SampleVector::CreateCountsStorageWhileLocked() {
-  local_counts_.resize(counts_size());
-  return &local_counts_[0];
-}
-
-PersistentSampleVector::PersistentSampleVector(
-    uint64_t id,
-    const BucketRanges* bucket_ranges,
-    Metadata* meta,
-    const DelayedPersistentAllocation& counts)
-    : SampleVectorBase(id, meta, bucket_ranges), persistent_counts_(counts) {
-  // Only mount the full storage if the single-sample has been disabled.
-  // Otherwise, it is possible for this object instance to start using (empty)
-  // storage that was created incidentally while another instance continues to
-  // update to the single sample. This "incidental creation" can happen because
-  // the memory is a DelayedPersistentAllocation which allows multiple memory
-  // blocks within it and applies an all-or-nothing approach to the allocation.
-  // Thus, a request elsewhere for one of the _other_ blocks would make _this_
-  // block available even though nothing has explicitly requested it.
-  //
-  // Note that it's not possible for the ctor to mount existing storage and
-  // move any single-sample to it because sometimes the persistent memory is
-  // read-only. Only non-const methods (which assume that memory is read/write)
-  // can do that.
-  if (single_sample().IsDisabled()) {
-    bool success = MountExistingCountsStorage();
-    DCHECK(success);
-  }
-}
-
-PersistentSampleVector::~PersistentSampleVector() = default;
-
-bool PersistentSampleVector::MountExistingCountsStorage() const {
-  // There is no early exit if counts is not yet mounted because, given that
-  // this is a virtual function, it's more efficient to do that at the call-
-  // site. There is no danger, however, should this get called anyway (perhaps
-  // because of a race condition) because at worst the |counts_| value would
-  // be over-written (in an atomic manner) with the exact same address.
-
-  if (!persistent_counts_.reference())
-    return false;  // Nothing to mount.
-
-  // Mount the counts array in position.
-  set_counts(
-      static_cast<HistogramBase::AtomicCount*>(persistent_counts_.Get()));
-
-  // The above shouldn't fail but can if the data is corrupt or incomplete.
-  return counts() != nullptr;
-}
-
-HistogramBase::AtomicCount*
-PersistentSampleVector::CreateCountsStorageWhileLocked() {
-  void* mem = persistent_counts_.Get();
-  if (!mem) {
-    // The above shouldn't fail but can if Bad Things(tm) are occurring in the
-    // persistent allocator. Crashing isn't a good option so instead just
-    // allocate something from the heap and return that. There will be no
-    // sharing or persistence but worse things are already happening.
-    return new HistogramBase::AtomicCount[counts_size()];
-  }
-
-  return static_cast<HistogramBase::AtomicCount*>(mem);
-}
-
-SampleVectorIterator::SampleVectorIterator(
-    const std::vector<HistogramBase::AtomicCount>* counts,
-    const BucketRanges* bucket_ranges)
-    : counts_(&(*counts)[0]),
-      counts_size_(counts->size()),
-      bucket_ranges_(bucket_ranges),
-      index_(0) {
-  DCHECK_GE(bucket_ranges_->bucket_count(), counts_size_);
-  SkipEmptyBuckets();
-}
-
-SampleVectorIterator::SampleVectorIterator(
-    const HistogramBase::AtomicCount* counts,
-    size_t counts_size,
-    const BucketRanges* bucket_ranges)
-    : counts_(counts),
-      counts_size_(counts_size),
-      bucket_ranges_(bucket_ranges),
-      index_(0) {
-  DCHECK_GE(bucket_ranges_->bucket_count(), counts_size_);
-  SkipEmptyBuckets();
-}
-
-SampleVectorIterator::~SampleVectorIterator() = default;
-
-bool SampleVectorIterator::Done() const {
-  return index_ >= counts_size_;
-}
-
-void SampleVectorIterator::Next() {
-  DCHECK(!Done());
-  index_++;
-  SkipEmptyBuckets();
-}
-
-void SampleVectorIterator::Get(HistogramBase::Sample* min,
-                               int64_t* max,
-                               HistogramBase::Count* count) const {
-  DCHECK(!Done());
-  if (min != nullptr)
-    *min = bucket_ranges_->range(index_);
-  if (max != nullptr)
-    *max = strict_cast<int64_t>(bucket_ranges_->range(index_ + 1));
-  if (count != nullptr)
-    *count = subtle::NoBarrier_Load(&counts_[index_]);
-}
-
-bool SampleVectorIterator::GetBucketIndex(size_t* index) const {
-  DCHECK(!Done());
-  if (index != nullptr)
-    *index = index_;
-  return true;
-}
-
-void SampleVectorIterator::SkipEmptyBuckets() {
-  if (Done())
-    return;
-
-  while (index_ < counts_size_) {
-    if (subtle::NoBarrier_Load(&counts_[index_]) != 0)
-      return;
-    index_++;
-  }
-}
-
-}  // namespace base
diff --git a/base/metrics/sample_vector.h b/base/metrics/sample_vector.h
deleted file mode 100644
index 278272d..0000000
--- a/base/metrics/sample_vector.h
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// SampleVector implements HistogramSamples interface. It is used by all
-// Histogram based classes to store samples.
-
-#ifndef BASE_METRICS_SAMPLE_VECTOR_H_
-#define BASE_METRICS_SAMPLE_VECTOR_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <memory>
-#include <vector>
-
-#include "base/atomicops.h"
-#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
-#include "base/macros.h"
-#include "base/metrics/bucket_ranges.h"
-#include "base/metrics/histogram_base.h"
-#include "base/metrics/histogram_samples.h"
-#include "base/metrics/persistent_memory_allocator.h"
-
-namespace base {
-
-class BucketRanges;
-
-class BASE_EXPORT SampleVectorBase : public HistogramSamples {
- public:
-  SampleVectorBase(uint64_t id,
-                   Metadata* meta,
-                   const BucketRanges* bucket_ranges);
-  ~SampleVectorBase() override;
-
-  // HistogramSamples:
-  void Accumulate(HistogramBase::Sample value,
-                  HistogramBase::Count count) override;
-  HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
-  HistogramBase::Count TotalCount() const override;
-  std::unique_ptr<SampleCountIterator> Iterator() const override;
-
-  // Get count of a specific bucket.
-  HistogramBase::Count GetCountAtIndex(size_t bucket_index) const;
-
-  // Access the bucket ranges held externally.
-  const BucketRanges* bucket_ranges() const { return bucket_ranges_; }
-
- protected:
-  bool AddSubtractImpl(
-      SampleCountIterator* iter,
-      HistogramSamples::Operator op) override;  // |op| is ADD or SUBTRACT.
-
-  virtual size_t GetBucketIndex(HistogramBase::Sample value) const;
-
-  // Moves the single-sample value to a mounted "counts" array.
-  void MoveSingleSampleToCounts();
-
-  // Mounts (creating if necessary) an array of "counts" for multi-value
-  // storage.
-  void MountCountsStorageAndMoveSingleSample();
-
-  // Mounts "counts" storage that already exists. This does not attempt to move
-  // any single-sample information to that storage as that would violate the
-  // "const" restriction that is often used to indicate read-only memory.
-  virtual bool MountExistingCountsStorage() const = 0;
-
-  // Creates "counts" storage and returns a pointer to it. Ownership of the
-  // array remains with the called method but will never change. This must be
-  // called while some sort of lock is held to prevent reentry.
-  virtual HistogramBase::Count* CreateCountsStorageWhileLocked() = 0;
-
-  HistogramBase::AtomicCount* counts() {
-    return reinterpret_cast<HistogramBase::AtomicCount*>(
-        subtle::Acquire_Load(&counts_));
-  }
-
-  const HistogramBase::AtomicCount* counts() const {
-    return reinterpret_cast<HistogramBase::AtomicCount*>(
-        subtle::Acquire_Load(&counts_));
-  }
-
-  void set_counts(const HistogramBase::AtomicCount* counts) const {
-    subtle::Release_Store(&counts_, reinterpret_cast<uintptr_t>(counts));
-  }
-
-  size_t counts_size() const { return bucket_ranges_->bucket_count(); }
-
- private:
-  friend class SampleVectorTest;
-  FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptSampleCounts);
-  FRIEND_TEST_ALL_PREFIXES(SharedHistogramTest, CorruptSampleCounts);
-
-  // |counts_| is actually a pointer to a HistogramBase::AtomicCount array but
-  // is held as an AtomicWord for concurrency reasons. When combined with the
-  // single_sample held in the metadata, there are four possible states:
-  //   1) single_sample == zero, counts_ == null
-  //   2) single_sample != zero, counts_ == null
-  //   3) single_sample != zero, counts_ != null BUT IS EMPTY
-  //   4) single_sample == zero, counts_ != null and may have data
-  // Once |counts_| is set, it can never revert and any existing single-sample
-  // must be moved to this storage. It is mutable because changing it doesn't
-  // change the (const) data but must adapt if a non-const object causes the
-  // storage to be allocated and updated.
-  mutable subtle::AtomicWord counts_ = 0;
-
-  // Shares the same BucketRanges with Histogram object.
-  const BucketRanges* const bucket_ranges_;
-
-  DISALLOW_COPY_AND_ASSIGN(SampleVectorBase);
-};
-
-// A sample vector that uses local memory for the counts array.
-class BASE_EXPORT SampleVector : public SampleVectorBase {
- public:
-  explicit SampleVector(const BucketRanges* bucket_ranges);
-  SampleVector(uint64_t id, const BucketRanges* bucket_ranges);
-  ~SampleVector() override;
-
- private:
-  // SampleVectorBase:
-  bool MountExistingCountsStorage() const override;
-  HistogramBase::Count* CreateCountsStorageWhileLocked() override;
-
-  // Simple local storage for counts.
-  mutable std::vector<HistogramBase::AtomicCount> local_counts_;
-
-  DISALLOW_COPY_AND_ASSIGN(SampleVector);
-};
-
-// A sample vector that uses persistent memory for the counts array.
-class BASE_EXPORT PersistentSampleVector : public SampleVectorBase {
- public:
-  PersistentSampleVector(uint64_t id,
-                         const BucketRanges* bucket_ranges,
-                         Metadata* meta,
-                         const DelayedPersistentAllocation& counts);
-  ~PersistentSampleVector() override;
-
- private:
-  // SampleVectorBase:
-  bool MountExistingCountsStorage() const override;
-  HistogramBase::Count* CreateCountsStorageWhileLocked() override;
-
-  // Persistent storage for counts.
-  DelayedPersistentAllocation persistent_counts_;
-
-  DISALLOW_COPY_AND_ASSIGN(PersistentSampleVector);
-};
-
-// An iterator for sample vectors. This could be defined privately in the .cc
-// file but is here for easy testing.
-class BASE_EXPORT SampleVectorIterator : public SampleCountIterator {
- public:
-  SampleVectorIterator(const std::vector<HistogramBase::AtomicCount>* counts,
-                       const BucketRanges* bucket_ranges);
-  SampleVectorIterator(const HistogramBase::AtomicCount* counts,
-                       size_t counts_size,
-                       const BucketRanges* bucket_ranges);
-  ~SampleVectorIterator() override;
-
-  // SampleCountIterator implementation:
-  bool Done() const override;
-  void Next() override;
-  void Get(HistogramBase::Sample* min,
-           int64_t* max,
-           HistogramBase::Count* count) const override;
-
-  // SampleVector uses predefined buckets, so iterator can return bucket index.
-  bool GetBucketIndex(size_t* index) const override;
-
- private:
-  void SkipEmptyBuckets();
-
-  const HistogramBase::AtomicCount* counts_;
-  size_t counts_size_;
-  const BucketRanges* bucket_ranges_;
-
-  size_t index_;
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_SAMPLE_VECTOR_H_
diff --git a/base/metrics/single_sample_metrics.cc b/base/metrics/single_sample_metrics.cc
deleted file mode 100644
index 57c1c8f..0000000
--- a/base/metrics/single_sample_metrics.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/single_sample_metrics.h"
-
-#include "base/memory/ptr_util.h"
-#include "base/metrics/histogram.h"
-
-namespace base {
-
-static SingleSampleMetricsFactory* g_factory = nullptr;
-
-// static
-SingleSampleMetricsFactory* SingleSampleMetricsFactory::Get() {
-  if (!g_factory)
-    g_factory = new DefaultSingleSampleMetricsFactory();
-
-  return g_factory;
-}
-
-// static
-void SingleSampleMetricsFactory::SetFactory(
-    std::unique_ptr<SingleSampleMetricsFactory> factory) {
-  DCHECK(!g_factory);
-  g_factory = factory.release();
-}
-
-// static
-void SingleSampleMetricsFactory::DeleteFactoryForTesting() {
-  DCHECK(g_factory);
-  delete g_factory;
-  g_factory = nullptr;
-}
-
-std::unique_ptr<SingleSampleMetric>
-DefaultSingleSampleMetricsFactory::CreateCustomCountsMetric(
-    const std::string& histogram_name,
-    HistogramBase::Sample min,
-    HistogramBase::Sample max,
-    uint32_t bucket_count) {
-  return std::make_unique<DefaultSingleSampleMetric>(
-      histogram_name, min, max, bucket_count,
-      HistogramBase::kUmaTargetedHistogramFlag);
-}
-
-DefaultSingleSampleMetric::DefaultSingleSampleMetric(
-    const std::string& histogram_name,
-    HistogramBase::Sample min,
-    HistogramBase::Sample max,
-    uint32_t bucket_count,
-    int32_t flags)
-    : histogram_(Histogram::FactoryGet(histogram_name,
-                                       min,
-                                       max,
-                                       bucket_count,
-                                       flags)) {
-  // Bad construction parameters may lead to |histogram_| being null; DCHECK to
-  // find accidental errors in production. We must still handle the nullptr in
-  // destruction though since this construction may come from another untrusted
-  // process.
-  DCHECK(histogram_);
-}
-
-DefaultSingleSampleMetric::~DefaultSingleSampleMetric() {
-  // |histogram_| may be nullptr if bad construction parameters are given.
-  if (sample_ < 0 || !histogram_)
-    return;
-  histogram_->Add(sample_);
-}
-
-void DefaultSingleSampleMetric::SetSample(HistogramBase::Sample sample) {
-  DCHECK_GE(sample, 0);
-  sample_ = sample;
-}
-
-}  // namespace base
diff --git a/base/metrics/single_sample_metrics.h b/base/metrics/single_sample_metrics.h
deleted file mode 100644
index b966cb1..0000000
--- a/base/metrics/single_sample_metrics.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_SINGLE_SAMPLE_METRICS_H_
-#define BASE_METRICS_SINGLE_SAMPLE_METRICS_H_
-
-#include <string>
-
-#include "base/base_export.h"
-#include "base/macros.h"
-#include "base/metrics/histogram_base.h"
-
-namespace base {
-
-// See base/metrics/histograms.h for parameter definitions. Must only be used
-// and destroyed from the same thread as construction.
-class BASE_EXPORT SingleSampleMetric {
- public:
-  virtual ~SingleSampleMetric() = default;
-
-  virtual void SetSample(HistogramBase::Sample sample) = 0;
-};
-
-// Factory for creating single sample metrics. A single sample metric only
-// reports its sample once at destruction time. The sample may be changed prior
-// to destruction using the SetSample() method as many times as desired.
-//
-// The metric creation methods are safe to call from any thread, however the
-// returned class must only be used and destroyed from the same thread as
-// construction.
-//
-// See base/metrics/histogram_macros.h for usage recommendations and
-// base/metrics/histogram.h for full parameter definitions.
-class BASE_EXPORT SingleSampleMetricsFactory {
- public:
-  virtual ~SingleSampleMetricsFactory() = default;
-
-  // Returns the factory provided by SetFactory(), or if no factory has been set
-  // a default factory will be provided (future calls to SetFactory() will fail
-  // if the default factory is ever vended).
-  static SingleSampleMetricsFactory* Get();
-  static void SetFactory(std::unique_ptr<SingleSampleMetricsFactory> factory);
-
-  // The factory normally persists until process shutdown, but in testing we
-  // should avoid leaking it since it sets a global.
-  static void DeleteFactoryForTesting();
-
-  // The methods below return a single sample metric for counts histograms; see
-  // method comments for the corresponding histogram macro.
-
-  // UMA_HISTOGRAM_CUSTOM_COUNTS()
-  virtual std::unique_ptr<SingleSampleMetric> CreateCustomCountsMetric(
-      const std::string& histogram_name,
-      HistogramBase::Sample min,
-      HistogramBase::Sample max,
-      uint32_t bucket_count) = 0;
-};
-
-// Default implementation for when no factory has been provided to the process.
-// Samples are only recorded within the current process in this case, so samples
-// will be lost in the event of sudden process termination.
-class BASE_EXPORT DefaultSingleSampleMetricsFactory
-    : public SingleSampleMetricsFactory {
- public:
-  DefaultSingleSampleMetricsFactory() = default;
-  ~DefaultSingleSampleMetricsFactory() override = default;
-
-  // SingleSampleMetricsFactory:
-  std::unique_ptr<SingleSampleMetric> CreateCustomCountsMetric(
-      const std::string& histogram_name,
-      HistogramBase::Sample min,
-      HistogramBase::Sample max,
-      uint32_t bucket_count) override;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(DefaultSingleSampleMetricsFactory);
-};
-
-class BASE_EXPORT DefaultSingleSampleMetric : public SingleSampleMetric {
- public:
-  DefaultSingleSampleMetric(const std::string& histogram_name,
-                            HistogramBase::Sample min,
-                            HistogramBase::Sample max,
-                            uint32_t bucket_count,
-                            int32_t flags);
-  ~DefaultSingleSampleMetric() override;
-
-  // SingleSampleMetric:
-  void SetSample(HistogramBase::Sample sample) override;
-
- private:
-  HistogramBase* const histogram_;
-
-  // The last sample provided to SetSample(). We use -1 as a sentinel value to
-  // indicate no sample has been set.
-  HistogramBase::Sample sample_ = -1;
-
-  DISALLOW_COPY_AND_ASSIGN(DefaultSingleSampleMetric);
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_SINGLE_SAMPLE_METRICS_H_
diff --git a/base/metrics/sparse_histogram.cc b/base/metrics/sparse_histogram.cc
deleted file mode 100644
index 30175a0..0000000
--- a/base/metrics/sparse_histogram.cc
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/sparse_histogram.h"
-
-#include <utility>
-
-#include "base/memory/ptr_util.h"
-#include "base/metrics/dummy_histogram.h"
-#include "base/metrics/metrics_hashes.h"
-#include "base/metrics/persistent_histogram_allocator.h"
-#include "base/metrics/persistent_sample_map.h"
-#include "base/metrics/sample_map.h"
-#include "base/metrics/statistics_recorder.h"
-#include "base/pickle.h"
-#include "base/strings/stringprintf.h"
-#include "base/synchronization/lock.h"
-
-namespace base {
-
-typedef HistogramBase::Count Count;
-typedef HistogramBase::Sample Sample;
-
-// static
-HistogramBase* SparseHistogram::FactoryGet(const std::string& name,
-                                           int32_t flags) {
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
-  if (!histogram) {
-    // TODO(gayane): |HashMetricName| is called again in Histogram constructor.
-    // Refactor code to avoid the additional call.
-    bool should_record =
-        StatisticsRecorder::ShouldRecordHistogram(HashMetricName(name));
-    if (!should_record)
-      return DummyHistogram::GetInstance();
-    // Try to create the histogram using a "persistent" allocator. As of
-    // 2016-02-25, the availability of such is controlled by a base::Feature
-    // that is off by default. If the allocator doesn't exist or if
-    // allocating from it fails, code below will allocate the histogram from
-    // the process heap.
-    PersistentMemoryAllocator::Reference histogram_ref = 0;
-    std::unique_ptr<HistogramBase> tentative_histogram;
-    PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
-    if (allocator) {
-      tentative_histogram = allocator->AllocateHistogram(
-          SPARSE_HISTOGRAM, name, 0, 0, nullptr, flags, &histogram_ref);
-    }
-
-    // Handle the case where no persistent allocator is present or the
-    // persistent allocation fails (perhaps because it is full).
-    if (!tentative_histogram) {
-      DCHECK(!histogram_ref);  // Should never have been set.
-      DCHECK(!allocator);      // Shouldn't have failed.
-      flags &= ~HistogramBase::kIsPersistent;
-      tentative_histogram.reset(new SparseHistogram(GetPermanentName(name)));
-      tentative_histogram->SetFlags(flags);
-    }
-
-    // Register this histogram with the StatisticsRecorder. Keep a copy of
-    // the pointer value to tell later whether the locally created histogram
-    // was registered or deleted. The type is "void" because it could point
-    // to released memory after the following line.
-    const void* tentative_histogram_ptr = tentative_histogram.get();
-    histogram = StatisticsRecorder::RegisterOrDeleteDuplicate(
-        tentative_histogram.release());
-
-    // Persistent histograms need some follow-up processing.
-    if (histogram_ref) {
-      allocator->FinalizeHistogram(histogram_ref,
-                                   histogram == tentative_histogram_ptr);
-    }
-  }
-
-  CHECK_EQ(SPARSE_HISTOGRAM, histogram->GetHistogramType());
-  return histogram;
-}
-
-// static
-std::unique_ptr<HistogramBase> SparseHistogram::PersistentCreate(
-    PersistentHistogramAllocator* allocator,
-    const char* name,
-    HistogramSamples::Metadata* meta,
-    HistogramSamples::Metadata* logged_meta) {
-  return WrapUnique(
-      new SparseHistogram(allocator, name, meta, logged_meta));
-}
-
-SparseHistogram::~SparseHistogram() = default;
-
-uint64_t SparseHistogram::name_hash() const {
-  return unlogged_samples_->id();
-}
-
-HistogramType SparseHistogram::GetHistogramType() const {
-  return SPARSE_HISTOGRAM;
-}
-
-bool SparseHistogram::HasConstructionArguments(
-    Sample expected_minimum,
-    Sample expected_maximum,
-    uint32_t expected_bucket_count) const {
-  // SparseHistogram never has min/max/bucket_count limit.
-  return false;
-}
-
-void SparseHistogram::Add(Sample value) {
-  AddCount(value, 1);
-}
-
-void SparseHistogram::AddCount(Sample value, int count) {
-  if (count <= 0) {
-    NOTREACHED();
-    return;
-  }
-  {
-    base::AutoLock auto_lock(lock_);
-    unlogged_samples_->Accumulate(value, count);
-  }
-
-  FindAndRunCallback(value);
-}
-
-std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotSamples() const {
-  std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
-
-  base::AutoLock auto_lock(lock_);
-  snapshot->Add(*unlogged_samples_);
-  snapshot->Add(*logged_samples_);
-  return std::move(snapshot);
-}
-
-std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotDelta() {
-  DCHECK(!final_delta_created_);
-
-  std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
-  base::AutoLock auto_lock(lock_);
-  snapshot->Add(*unlogged_samples_);
-
-  unlogged_samples_->Subtract(*snapshot);
-  logged_samples_->Add(*snapshot);
-  return std::move(snapshot);
-}
-
-std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotFinalDelta() const {
-  DCHECK(!final_delta_created_);
-  final_delta_created_ = true;
-
-  std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
-  base::AutoLock auto_lock(lock_);
-  snapshot->Add(*unlogged_samples_);
-
-  return std::move(snapshot);
-}
-
-void SparseHistogram::AddSamples(const HistogramSamples& samples) {
-  base::AutoLock auto_lock(lock_);
-  unlogged_samples_->Add(samples);
-}
-
-bool SparseHistogram::AddSamplesFromPickle(PickleIterator* iter) {
-  base::AutoLock auto_lock(lock_);
-  return unlogged_samples_->AddFromPickle(iter);
-}
-
-void SparseHistogram::WriteHTMLGraph(std::string* output) const {
-  output->append("<PRE>");
-  WriteAsciiImpl(true, "<br>", output);
-  output->append("</PRE>");
-}
-
-void SparseHistogram::WriteAscii(std::string* output) const {
-  WriteAsciiImpl(true, "\n", output);
-}
-
-void SparseHistogram::SerializeInfoImpl(Pickle* pickle) const {
-  pickle->WriteString(histogram_name());
-  pickle->WriteInt(flags());
-}
-
-SparseHistogram::SparseHistogram(const char* name)
-    : HistogramBase(name),
-      unlogged_samples_(new SampleMap(HashMetricName(name))),
-      logged_samples_(new SampleMap(unlogged_samples_->id())) {}
-
-SparseHistogram::SparseHistogram(PersistentHistogramAllocator* allocator,
-                                 const char* name,
-                                 HistogramSamples::Metadata* meta,
-                                 HistogramSamples::Metadata* logged_meta)
-    : HistogramBase(name),
-      // While other histogram types maintain a static vector of values with
-      // sufficient space for both "active" and "logged" samples, with each
-      // SampleVector being given the appropriate half, sparse histograms
-      // have no such initial allocation. Each sample has its own record
-      // attached to a single PersistentSampleMap by a common 64-bit identifier.
-      // Since a sparse histogram has two sample maps (active and logged),
-      // there must be two sets of sample records with diffent IDs. The
-      // "active" samples use, for convenience purposes, an ID matching
-      // that of the histogram while the "logged" samples use that number
-      // plus 1.
-      unlogged_samples_(
-          new PersistentSampleMap(HashMetricName(name), allocator, meta)),
-      logged_samples_(new PersistentSampleMap(unlogged_samples_->id() + 1,
-                                              allocator,
-                                              logged_meta)) {}
-
-HistogramBase* SparseHistogram::DeserializeInfoImpl(PickleIterator* iter) {
-  std::string histogram_name;
-  int flags;
-  if (!iter->ReadString(&histogram_name) || !iter->ReadInt(&flags)) {
-    DLOG(ERROR) << "Pickle error decoding Histogram: " << histogram_name;
-    return nullptr;
-  }
-
-  flags &= ~HistogramBase::kIPCSerializationSourceFlag;
-
-  return SparseHistogram::FactoryGet(histogram_name, flags);
-}
-
-void SparseHistogram::GetParameters(DictionaryValue* params) const {
-  // TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
-}
-
-void SparseHistogram::GetCountAndBucketData(Count* count,
-                                            int64_t* sum,
-                                            ListValue* buckets) const {
-  // TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
-}
-
-void SparseHistogram::WriteAsciiImpl(bool graph_it,
-                                     const std::string& newline,
-                                     std::string* output) const {
-  // Get a local copy of the data so we are consistent.
-  std::unique_ptr<HistogramSamples> snapshot = SnapshotSamples();
-  Count total_count = snapshot->TotalCount();
-  double scaled_total_count = total_count / 100.0;
-
-  WriteAsciiHeader(total_count, output);
-  output->append(newline);
-
-  // Determine how wide the largest bucket range is (how many digits to print),
-  // so that we'll be able to right-align starts for the graphical bars.
-  // Determine which bucket has the largest sample count so that we can
-  // normalize the graphical bar-width relative to that sample count.
-  Count largest_count = 0;
-  Sample largest_sample = 0;
-  std::unique_ptr<SampleCountIterator> it = snapshot->Iterator();
-  while (!it->Done()) {
-    Sample min;
-    int64_t max;
-    Count count;
-    it->Get(&min, &max, &count);
-    if (min > largest_sample)
-      largest_sample = min;
-    if (count > largest_count)
-      largest_count = count;
-    it->Next();
-  }
-  size_t print_width = GetSimpleAsciiBucketRange(largest_sample).size() + 1;
-
-  // iterate over each item and display them
-  it = snapshot->Iterator();
-  while (!it->Done()) {
-    Sample min;
-    int64_t max;
-    Count count;
-    it->Get(&min, &max, &count);
-
-    // value is min, so display it
-    std::string range = GetSimpleAsciiBucketRange(min);
-    output->append(range);
-    for (size_t j = 0; range.size() + j < print_width + 1; ++j)
-      output->push_back(' ');
-
-    if (graph_it)
-      WriteAsciiBucketGraph(count, largest_count, output);
-    WriteAsciiBucketValue(count, scaled_total_count, output);
-    output->append(newline);
-    it->Next();
-  }
-}
-
-void SparseHistogram::WriteAsciiHeader(const Count total_count,
-                                       std::string* output) const {
-  StringAppendF(output, "Histogram: %s recorded %d samples", histogram_name(),
-                total_count);
-  if (flags())
-    StringAppendF(output, " (flags = 0x%x)", flags());
-}
-
-}  // namespace base
diff --git a/base/metrics/sparse_histogram.h b/base/metrics/sparse_histogram.h
deleted file mode 100644
index 913762c..0000000
--- a/base/metrics/sparse_histogram.h
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_SPARSE_HISTOGRAM_H_
-#define BASE_METRICS_SPARSE_HISTOGRAM_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <map>
-#include <memory>
-#include <string>
-
-#include "base/base_export.h"
-#include "base/macros.h"
-#include "base/metrics/histogram_base.h"
-#include "base/metrics/histogram_samples.h"
-#include "base/synchronization/lock.h"
-
-namespace base {
-
-class HistogramSamples;
-class PersistentHistogramAllocator;
-class Pickle;
-class PickleIterator;
-
-class BASE_EXPORT SparseHistogram : public HistogramBase {
- public:
-  // If there's one with same name, return the existing one. If not, create a
-  // new one.
-  static HistogramBase* FactoryGet(const std::string& name, int32_t flags);
-
-  // Create a histogram using data in persistent storage. The allocator must
-  // live longer than the created sparse histogram.
-  static std::unique_ptr<HistogramBase> PersistentCreate(
-      PersistentHistogramAllocator* allocator,
-      const char* name,
-      HistogramSamples::Metadata* meta,
-      HistogramSamples::Metadata* logged_meta);
-
-  ~SparseHistogram() override;
-
-  // HistogramBase implementation:
-  uint64_t name_hash() const override;
-  HistogramType GetHistogramType() const override;
-  bool HasConstructionArguments(Sample expected_minimum,
-                                Sample expected_maximum,
-                                uint32_t expected_bucket_count) const override;
-  void Add(Sample value) override;
-  void AddCount(Sample value, int count) override;
-  void AddSamples(const HistogramSamples& samples) override;
-  bool AddSamplesFromPickle(base::PickleIterator* iter) override;
-  std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
-  std::unique_ptr<HistogramSamples> SnapshotDelta() override;
-  std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
-  void WriteHTMLGraph(std::string* output) const override;
-  void WriteAscii(std::string* output) const override;
-
- protected:
-  // HistogramBase implementation:
-  void SerializeInfoImpl(base::Pickle* pickle) const override;
-
- private:
-  // Clients should always use FactoryGet to create SparseHistogram.
-  explicit SparseHistogram(const char* name);
-
-  SparseHistogram(PersistentHistogramAllocator* allocator,
-                  const char* name,
-                  HistogramSamples::Metadata* meta,
-                  HistogramSamples::Metadata* logged_meta);
-
-  friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
-      base::PickleIterator* iter);
-  static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
-
-  void GetParameters(DictionaryValue* params) const override;
-  void GetCountAndBucketData(Count* count,
-                             int64_t* sum,
-                             ListValue* buckets) const override;
-
-  // Helpers for emitting Ascii graphic.  Each method appends data to output.
-  void WriteAsciiImpl(bool graph_it,
-                      const std::string& newline,
-                      std::string* output) const;
-
-  // Write a common header message describing this histogram.
-  void WriteAsciiHeader(const Count total_count,
-                        std::string* output) const;
-
-  // For constuctor calling.
-  friend class SparseHistogramTest;
-
-  // Protects access to |samples_|.
-  mutable base::Lock lock_;
-
-  // Flag to indicate if PrepareFinalDelta has been previously called.
-  mutable bool final_delta_created_ = false;
-
-  std::unique_ptr<HistogramSamples> unlogged_samples_;
-  std::unique_ptr<HistogramSamples> logged_samples_;
-
-  DISALLOW_COPY_AND_ASSIGN(SparseHistogram);
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_SPARSE_HISTOGRAM_H_
diff --git a/base/metrics/statistics_recorder.cc b/base/metrics/statistics_recorder.cc
deleted file mode 100644
index 28773a1..0000000
--- a/base/metrics/statistics_recorder.cc
+++ /dev/null
@@ -1,416 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/statistics_recorder.h"
-
-#include <memory>
-
-#include "base/at_exit.h"
-#include "base/debug/leak_annotations.h"
-#include "base/json/string_escape.h"
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/metrics/histogram.h"
-#include "base/metrics/histogram_snapshot_manager.h"
-#include "base/metrics/metrics_hashes.h"
-#include "base/metrics/persistent_histogram_allocator.h"
-#include "base/metrics/record_histogram_checker.h"
-#include "base/stl_util.h"
-#include "base/strings/stringprintf.h"
-#include "base/values.h"
-
-namespace base {
-namespace {
-
-bool HistogramNameLesser(const base::HistogramBase* a,
-                         const base::HistogramBase* b) {
-  return strcmp(a->histogram_name(), b->histogram_name()) < 0;
-}
-
-}  // namespace
-
-// static
-LazyInstance<Lock>::Leaky StatisticsRecorder::lock_;
-
-// static
-StatisticsRecorder* StatisticsRecorder::top_ = nullptr;
-
-// static
-bool StatisticsRecorder::is_vlog_initialized_ = false;
-
-size_t StatisticsRecorder::BucketRangesHash::operator()(
-    const BucketRanges* const a) const {
-  return a->checksum();
-}
-
-bool StatisticsRecorder::BucketRangesEqual::operator()(
-    const BucketRanges* const a,
-    const BucketRanges* const b) const {
-  return a->Equals(b);
-}
-
-StatisticsRecorder::~StatisticsRecorder() {
-  const AutoLock auto_lock(lock_.Get());
-  DCHECK_EQ(this, top_);
-  top_ = previous_;
-}
-
-// static
-void StatisticsRecorder::EnsureGlobalRecorderWhileLocked() {
-  lock_.Get().AssertAcquired();
-  if (top_)
-    return;
-
-  const StatisticsRecorder* const p = new StatisticsRecorder;
-  // The global recorder is never deleted.
-  ANNOTATE_LEAKING_OBJECT_PTR(p);
-  DCHECK_EQ(p, top_);
-}
-
-// static
-void StatisticsRecorder::RegisterHistogramProvider(
-    const WeakPtr<HistogramProvider>& provider) {
-  const AutoLock auto_lock(lock_.Get());
-  EnsureGlobalRecorderWhileLocked();
-  top_->providers_.push_back(provider);
-}
-
-// static
-HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
-    HistogramBase* histogram) {
-  // Declared before |auto_lock| to ensure correct destruction order.
-  std::unique_ptr<HistogramBase> histogram_deleter;
-  const AutoLock auto_lock(lock_.Get());
-  EnsureGlobalRecorderWhileLocked();
-
-  const char* const name = histogram->histogram_name();
-  HistogramBase*& registered = top_->histograms_[name];
-
-  if (!registered) {
-    // |name| is guaranteed to never change or be deallocated so long
-    // as the histogram is alive (which is forever).
-    registered = histogram;
-    ANNOTATE_LEAKING_OBJECT_PTR(histogram);  // see crbug.com/79322
-    // If there are callbacks for this histogram, we set the kCallbackExists
-    // flag.
-    const auto callback_iterator = top_->callbacks_.find(name);
-    if (callback_iterator != top_->callbacks_.end()) {
-      if (!callback_iterator->second.is_null())
-        histogram->SetFlags(HistogramBase::kCallbackExists);
-      else
-        histogram->ClearFlags(HistogramBase::kCallbackExists);
-    }
-    return histogram;
-  }
-
-  if (histogram == registered) {
-    // The histogram was registered before.
-    return histogram;
-  }
-
-  // We already have one histogram with this name.
-  histogram_deleter.reset(histogram);
-  return registered;
-}
-
-// static
-const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
-    const BucketRanges* ranges) {
-  DCHECK(ranges->HasValidChecksum());
-
-  // Declared before |auto_lock| to ensure correct destruction order.
-  std::unique_ptr<const BucketRanges> ranges_deleter;
-  const AutoLock auto_lock(lock_.Get());
-  EnsureGlobalRecorderWhileLocked();
-
-  const BucketRanges* const registered = *top_->ranges_.insert(ranges).first;
-  if (registered == ranges) {
-    ANNOTATE_LEAKING_OBJECT_PTR(ranges);
-  } else {
-    ranges_deleter.reset(ranges);
-  }
-
-  return registered;
-}
-
-// static
-void StatisticsRecorder::WriteHTMLGraph(const std::string& query,
-                                        std::string* output) {
-  for (const HistogramBase* const histogram :
-       Sort(WithName(GetHistograms(), query))) {
-    histogram->WriteHTMLGraph(output);
-    *output += "<br><hr><br>";
-  }
-}
-
-// static
-void StatisticsRecorder::WriteGraph(const std::string& query,
-                                    std::string* output) {
-  if (query.length())
-    StringAppendF(output, "Collections of histograms for %s\n", query.c_str());
-  else
-    output->append("Collections of all histograms\n");
-
-  for (const HistogramBase* const histogram :
-       Sort(WithName(GetHistograms(), query))) {
-    histogram->WriteAscii(output);
-    output->append("\n");
-  }
-}
-
-// static
-std::string StatisticsRecorder::ToJSON(JSONVerbosityLevel verbosity_level) {
-  std::string output = "{\"histograms\":[";
-  const char* sep = "";
-  for (const HistogramBase* const histogram : Sort(GetHistograms())) {
-    output += sep;
-    sep = ",";
-    std::string json;
-    histogram->WriteJSON(&json, verbosity_level);
-    output += json;
-  }
-  output += "]}";
-  return output;
-}
-
-// static
-std::vector<const BucketRanges*> StatisticsRecorder::GetBucketRanges() {
-  std::vector<const BucketRanges*> out;
-  const AutoLock auto_lock(lock_.Get());
-  EnsureGlobalRecorderWhileLocked();
-  out.reserve(top_->ranges_.size());
-  out.assign(top_->ranges_.begin(), top_->ranges_.end());
-  return out;
-}
-
-// static
-HistogramBase* StatisticsRecorder::FindHistogram(base::StringPiece name) {
-  // This must be called *before* the lock is acquired below because it will
-  // call back into this object to register histograms. Those called methods
-  // will acquire the lock at that time.
-  ImportGlobalPersistentHistograms();
-
-  const AutoLock auto_lock(lock_.Get());
-  EnsureGlobalRecorderWhileLocked();
-
-  const HistogramMap::const_iterator it = top_->histograms_.find(name);
-  return it != top_->histograms_.end() ? it->second : nullptr;
-}
-
-// static
-StatisticsRecorder::HistogramProviders
-StatisticsRecorder::GetHistogramProviders() {
-  const AutoLock auto_lock(lock_.Get());
-  EnsureGlobalRecorderWhileLocked();
-  return top_->providers_;
-}
-
-// static
-void StatisticsRecorder::ImportProvidedHistograms() {
-  // Merge histogram data from each provider in turn.
-  for (const WeakPtr<HistogramProvider>& provider : GetHistogramProviders()) {
-    // Weak-pointer may be invalid if the provider was destructed, though they
-    // generally never are.
-    if (provider)
-      provider->MergeHistogramDeltas();
-  }
-}
-
-// static
-void StatisticsRecorder::PrepareDeltas(
-    bool include_persistent,
-    HistogramBase::Flags flags_to_set,
-    HistogramBase::Flags required_flags,
-    HistogramSnapshotManager* snapshot_manager) {
-  Histograms histograms = GetHistograms();
-  if (!include_persistent)
-    histograms = NonPersistent(std::move(histograms));
-  snapshot_manager->PrepareDeltas(Sort(std::move(histograms)), flags_to_set,
-                                  required_flags);
-}
-
-// static
-void StatisticsRecorder::InitLogOnShutdown() {
-  const AutoLock auto_lock(lock_.Get());
-  InitLogOnShutdownWhileLocked();
-}
-
-// static
-bool StatisticsRecorder::SetCallback(
-    const std::string& name,
-    const StatisticsRecorder::OnSampleCallback& cb) {
-  DCHECK(!cb.is_null());
-  const AutoLock auto_lock(lock_.Get());
-  EnsureGlobalRecorderWhileLocked();
-
-  if (!top_->callbacks_.insert({name, cb}).second)
-    return false;
-
-  const HistogramMap::const_iterator it = top_->histograms_.find(name);
-  if (it != top_->histograms_.end())
-    it->second->SetFlags(HistogramBase::kCallbackExists);
-
-  return true;
-}
-
-// static
-void StatisticsRecorder::ClearCallback(const std::string& name) {
-  const AutoLock auto_lock(lock_.Get());
-  EnsureGlobalRecorderWhileLocked();
-
-  top_->callbacks_.erase(name);
-
-  // We also clear the flag from the histogram (if it exists).
-  const HistogramMap::const_iterator it = top_->histograms_.find(name);
-  if (it != top_->histograms_.end())
-    it->second->ClearFlags(HistogramBase::kCallbackExists);
-}
-
-// static
-StatisticsRecorder::OnSampleCallback StatisticsRecorder::FindCallback(
-    const std::string& name) {
-  const AutoLock auto_lock(lock_.Get());
-  EnsureGlobalRecorderWhileLocked();
-  const auto it = top_->callbacks_.find(name);
-  return it != top_->callbacks_.end() ? it->second : OnSampleCallback();
-}
-
-// static
-size_t StatisticsRecorder::GetHistogramCount() {
-  const AutoLock auto_lock(lock_.Get());
-  EnsureGlobalRecorderWhileLocked();
-  return top_->histograms_.size();
-}
-
-// static
-void StatisticsRecorder::ForgetHistogramForTesting(base::StringPiece name) {
-  const AutoLock auto_lock(lock_.Get());
-  EnsureGlobalRecorderWhileLocked();
-
-  const HistogramMap::iterator found = top_->histograms_.find(name);
-  if (found == top_->histograms_.end())
-    return;
-
-  HistogramBase* const base = found->second;
-  if (base->GetHistogramType() != SPARSE_HISTOGRAM) {
-    // When forgetting a histogram, it's likely that other information is
-    // also becoming invalid. Clear the persistent reference that may no
-    // longer be valid. There's no danger in this as, at worst, duplicates
-    // will be created in persistent memory.
-    static_cast<Histogram*>(base)->bucket_ranges()->set_persistent_reference(0);
-  }
-
-  top_->histograms_.erase(found);
-}
-
-// static
-std::unique_ptr<StatisticsRecorder>
-StatisticsRecorder::CreateTemporaryForTesting() {
-  const AutoLock auto_lock(lock_.Get());
-  return WrapUnique(new StatisticsRecorder());
-}
-
-// static
-void StatisticsRecorder::SetRecordChecker(
-    std::unique_ptr<RecordHistogramChecker> record_checker) {
-  const AutoLock auto_lock(lock_.Get());
-  EnsureGlobalRecorderWhileLocked();
-  top_->record_checker_ = std::move(record_checker);
-}
-
-// static
-bool StatisticsRecorder::ShouldRecordHistogram(uint64_t histogram_hash) {
-  const AutoLock auto_lock(lock_.Get());
-  EnsureGlobalRecorderWhileLocked();
-  return !top_->record_checker_ ||
-         top_->record_checker_->ShouldRecord(histogram_hash);
-}
-
-// static
-StatisticsRecorder::Histograms StatisticsRecorder::GetHistograms() {
-  // This must be called *before* the lock is acquired below because it will
-  // call back into this object to register histograms. Those called methods
-  // will acquire the lock at that time.
-  ImportGlobalPersistentHistograms();
-
-  Histograms out;
-
-  const AutoLock auto_lock(lock_.Get());
-  EnsureGlobalRecorderWhileLocked();
-
-  out.reserve(top_->histograms_.size());
-  for (const auto& entry : top_->histograms_)
-    out.push_back(entry.second);
-
-  return out;
-}
-
-// static
-StatisticsRecorder::Histograms StatisticsRecorder::Sort(Histograms histograms) {
-  std::sort(histograms.begin(), histograms.end(), &HistogramNameLesser);
-  return histograms;
-}
-
-// static
-StatisticsRecorder::Histograms StatisticsRecorder::WithName(
-    Histograms histograms,
-    const std::string& query) {
-  // Need a C-string query for comparisons against C-string histogram name.
-  const char* const query_string = query.c_str();
-  histograms.erase(std::remove_if(histograms.begin(), histograms.end(),
-                                  [query_string](const HistogramBase* const h) {
-                                    return !strstr(h->histogram_name(),
-                                                   query_string);
-                                  }),
-                   histograms.end());
-  return histograms;
-}
-
-// static
-StatisticsRecorder::Histograms StatisticsRecorder::NonPersistent(
-    Histograms histograms) {
-  histograms.erase(
-      std::remove_if(histograms.begin(), histograms.end(),
-                     [](const HistogramBase* const h) {
-                       return (h->flags() & HistogramBase::kIsPersistent) != 0;
-                     }),
-      histograms.end());
-  return histograms;
-}
-
-// static
-void StatisticsRecorder::ImportGlobalPersistentHistograms() {
-  // Import histograms from known persistent storage. Histograms could have been
-  // added by other processes and they must be fetched and recognized locally.
-  // If the persistent memory segment is not shared between processes, this call
-  // does nothing.
-  if (GlobalHistogramAllocator* allocator = GlobalHistogramAllocator::Get())
-    allocator->ImportHistogramsToStatisticsRecorder();
-}
-
-// This singleton instance should be started during the single threaded portion
-// of main(), and hence it is not thread safe. It initializes globals to provide
-// support for all future calls.
-StatisticsRecorder::StatisticsRecorder() {
-  lock_.Get().AssertAcquired();
-  previous_ = top_;
-  top_ = this;
-  InitLogOnShutdownWhileLocked();
-}
-
-// static
-void StatisticsRecorder::InitLogOnShutdownWhileLocked() {
-  lock_.Get().AssertAcquired();
-  if (!is_vlog_initialized_ && VLOG_IS_ON(1)) {
-    is_vlog_initialized_ = true;
-    const auto dump_to_vlog = [](void*) {
-      std::string output;
-      WriteGraph("", &output);
-      VLOG(1) << output;
-    };
-    AtExitManager::RegisterCallback(dump_to_vlog, nullptr);
-  }
-}
-
-}  // namespace base
diff --git a/base/metrics/statistics_recorder.h b/base/metrics/statistics_recorder.h
deleted file mode 100644
index 87a9311..0000000
--- a/base/metrics/statistics_recorder.h
+++ /dev/null
@@ -1,302 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// StatisticsRecorder holds all Histograms and BucketRanges that are used by
-// Histograms in the system. It provides a general place for
-// Histograms/BucketRanges to register, and supports a global API for accessing
-// (i.e., dumping, or graphing) the data.
-
-#ifndef BASE_METRICS_STATISTICS_RECORDER_H_
-#define BASE_METRICS_STATISTICS_RECORDER_H_
-
-#include <stdint.h>
-
-#include <memory>
-#include <string>
-#include <unordered_map>
-#include <unordered_set>
-#include <vector>
-
-#include "base/base_export.h"
-#include "base/callback.h"
-#include "base/gtest_prod_util.h"
-#include "base/lazy_instance.h"
-#include "base/macros.h"
-#include "base/memory/weak_ptr.h"
-#include "base/metrics/histogram_base.h"
-#include "base/metrics/record_histogram_checker.h"
-#include "base/strings/string_piece.h"
-#include "base/synchronization/lock.h"
-
-namespace base {
-
-class BucketRanges;
-class HistogramSnapshotManager;
-
-// In-memory recorder of usage statistics (aka metrics, aka histograms).
-//
-// All the public methods are static and act on a global recorder. This global
-// recorder is internally synchronized and all the static methods are thread
-// safe.
-//
-// StatisticsRecorder doesn't have any public constructor. For testing purpose,
-// you can create a temporary recorder using the factory method
-// CreateTemporaryForTesting(). This temporary recorder becomes the global one
-// until deleted. When this temporary recorder is deleted, it restores the
-// previous global one.
-class BASE_EXPORT StatisticsRecorder {
- public:
-  // An interface class that allows the StatisticsRecorder to forcibly merge
-  // histograms from providers when necessary.
-  class HistogramProvider {
-   public:
-    // Merges all histogram information into the global versions.
-    virtual void MergeHistogramDeltas() = 0;
-  };
-
-  typedef std::vector<HistogramBase*> Histograms;
-
-  // Restores the previous global recorder.
-  //
-  // When several temporary recorders are created using
-  // CreateTemporaryForTesting(), these recorders must be deleted in reverse
-  // order of creation.
-  //
-  // This method is thread safe.
-  //
-  // Precondition: The recorder being deleted is the current global recorder.
-  ~StatisticsRecorder();
-
-  // Registers a provider of histograms that can be called to merge those into
-  // the global recorder. Calls to ImportProvidedHistograms() will fetch from
-  // registered providers.
-  //
-  // This method is thread safe.
-  static void RegisterHistogramProvider(
-      const WeakPtr<HistogramProvider>& provider);
-
-  // Registers or adds a new histogram to the collection of statistics. If an
-  // identically named histogram is already registered, then the argument
-  // |histogram| will be deleted. The returned value is always the registered
-  // histogram (either the argument, or the pre-existing registered histogram).
-  //
-  // This method is thread safe.
-  static HistogramBase* RegisterOrDeleteDuplicate(HistogramBase* histogram);
-
-  // Registers or adds a new BucketRanges. If an equivalent BucketRanges is
-  // already registered, then the argument |ranges| will be deleted. The
-  // returned value is always the registered BucketRanges (either the argument,
-  // or the pre-existing one).
-  //
-  // This method is thread safe.
-  static const BucketRanges* RegisterOrDeleteDuplicateRanges(
-      const BucketRanges* ranges);
-
-  // Methods for appending histogram data to a string.  Only histograms which
-  // have |query| as a substring are written to |output| (an empty string will
-  // process all registered histograms).
-  //
-  // These methods are thread safe.
-  static void WriteHTMLGraph(const std::string& query, std::string* output);
-  static void WriteGraph(const std::string& query, std::string* output);
-
-  // Returns the histograms with |verbosity_level| as the serialization
-  // verbosity.
-  //
-  // This method is thread safe.
-  static std::string ToJSON(JSONVerbosityLevel verbosity_level);
-
-  // Gets existing histograms.
-  //
-  // The order of returned histograms is not guaranteed.
-  //
-  // Ownership of the individual histograms remains with the StatisticsRecorder.
-  //
-  // This method is thread safe.
-  static Histograms GetHistograms();
-
-  // Gets BucketRanges used by all histograms registered. The order of returned
-  // BucketRanges is not guaranteed.
-  //
-  // This method is thread safe.
-  static std::vector<const BucketRanges*> GetBucketRanges();
-
-  // Finds a histogram by name. Matches the exact name. Returns a null pointer
-  // if a matching histogram is not found.
-  //
-  // This method is thread safe.
-  static HistogramBase* FindHistogram(base::StringPiece name);
-
-  // Imports histograms from providers.
-  //
-  // This method must be called on the UI thread.
-  static void ImportProvidedHistograms();
-
-  // Snapshots all histograms via |snapshot_manager|. |flags_to_set| is used to
-  // set flags for each histogram. |required_flags| is used to select
-  // histograms to be recorded. Only histograms that have all the flags
-  // specified by the argument will be chosen. If all histograms should be
-  // recorded, set it to |Histogram::kNoFlags|.
-  static void PrepareDeltas(bool include_persistent,
-                            HistogramBase::Flags flags_to_set,
-                            HistogramBase::Flags required_flags,
-                            HistogramSnapshotManager* snapshot_manager);
-
-  typedef base::Callback<void(HistogramBase::Sample)> OnSampleCallback;
-
-  // Sets the callback to notify when a new sample is recorded on the histogram
-  // referred to by |histogram_name|. Can be called before or after the
-  // histogram is created. Returns whether the callback was successfully set.
-  //
-  // This method is thread safe.
-  static bool SetCallback(const std::string& histogram_name,
-                          const OnSampleCallback& callback);
-
-  // Clears any callback set on the histogram referred to by |histogram_name|.
-  //
-  // This method is thread safe.
-  static void ClearCallback(const std::string& histogram_name);
-
-  // Retrieves the callback for the histogram referred to by |histogram_name|,
-  // or a null callback if no callback exists for this histogram.
-  //
-  // This method is thread safe.
-  static OnSampleCallback FindCallback(const std::string& histogram_name);
-
-  // Returns the number of known histograms.
-  //
-  // This method is thread safe.
-  static size_t GetHistogramCount();
-
-  // Initializes logging histograms with --v=1. Safe to call multiple times.
-  // Is called from ctor but for browser it seems that it is more useful to
-  // start logging after statistics recorder, so we need to init log-on-shutdown
-  // later.
-  //
-  // This method is thread safe.
-  static void InitLogOnShutdown();
-
-  // Removes a histogram from the internal set of known ones. This can be
-  // necessary during testing persistent histograms where the underlying
-  // memory is being released.
-  //
-  // This method is thread safe.
-  static void ForgetHistogramForTesting(base::StringPiece name);
-
-  // Creates a temporary StatisticsRecorder object for testing purposes. All new
-  // histograms will be registered in it until it is destructed or pushed aside
-  // for the lifetime of yet another StatisticsRecorder object. The destruction
-  // of the returned object will re-activate the previous one.
-  // StatisticsRecorder objects must be deleted in the opposite order to which
-  // they're created.
-  //
-  // This method is thread safe.
-  static std::unique_ptr<StatisticsRecorder> CreateTemporaryForTesting()
-      WARN_UNUSED_RESULT;
-
-  // Sets the record checker for determining if a histogram should be recorded.
-  // Record checker doesn't affect any already recorded histograms, so this
-  // method must be called very early, before any threads have started.
-  // Record checker methods can be called on any thread, so they shouldn't
-  // mutate any state.
-  static void SetRecordChecker(
-      std::unique_ptr<RecordHistogramChecker> record_checker);
-
-  // Checks if the given histogram should be recorded based on the
-  // ShouldRecord() method of the record checker. If the record checker is not
-  // set, returns true.
-  //
-  // This method is thread safe.
-  static bool ShouldRecordHistogram(uint64_t histogram_hash);
-
-  // Sorts histograms by name.
-  static Histograms Sort(Histograms histograms);
-
-  // Filters histograms by name. Only histograms which have |query| as a
-  // substring in their name are kept. An empty query keeps all histograms.
-  static Histograms WithName(Histograms histograms, const std::string& query);
-
-  // Filters histograms by persistency. Only non-persistent histograms are kept.
-  static Histograms NonPersistent(Histograms histograms);
-
- private:
-  typedef std::vector<WeakPtr<HistogramProvider>> HistogramProviders;
-
-  typedef std::unordered_map<StringPiece, HistogramBase*, StringPieceHash>
-      HistogramMap;
-
-  // We keep a map of callbacks to histograms, so that as histograms are
-  // created, we can set the callback properly.
-  typedef std::unordered_map<std::string, OnSampleCallback> CallbackMap;
-
-  struct BucketRangesHash {
-    size_t operator()(const BucketRanges* a) const;
-  };
-
-  struct BucketRangesEqual {
-    bool operator()(const BucketRanges* a, const BucketRanges* b) const;
-  };
-
-  typedef std::
-      unordered_set<const BucketRanges*, BucketRangesHash, BucketRangesEqual>
-          RangesMap;
-
-  friend class StatisticsRecorderTest;
-  FRIEND_TEST_ALL_PREFIXES(StatisticsRecorderTest, IterationTest);
-
-  // Initializes the global recorder if it doesn't already exist. Safe to call
-  // multiple times.
-  //
-  // Precondition: The global lock is already acquired.
-  static void EnsureGlobalRecorderWhileLocked();
-
-  // Gets histogram providers.
-  //
-  // This method is thread safe.
-  static HistogramProviders GetHistogramProviders();
-
-  // Imports histograms from global persistent memory.
-  //
-  // Precondition: The global lock must not be held during this call.
-  static void ImportGlobalPersistentHistograms();
-
-  // Constructs a new StatisticsRecorder and sets it as the current global
-  // recorder.
-  //
-  // Precondition: The global lock is already acquired.
-  StatisticsRecorder();
-
-  // Initialize implementation but without lock. Caller should guard
-  // StatisticsRecorder by itself if needed (it isn't in unit tests).
-  //
-  // Precondition: The global lock is already acquired.
-  static void InitLogOnShutdownWhileLocked();
-
-  HistogramMap histograms_;
-  CallbackMap callbacks_;
-  RangesMap ranges_;
-  HistogramProviders providers_;
-  std::unique_ptr<RecordHistogramChecker> record_checker_;
-
-  // Previous global recorder that existed when this one was created.
-  StatisticsRecorder* previous_ = nullptr;
-
-  // Global lock for internal synchronization.
-  static LazyInstance<Lock>::Leaky lock_;
-
-  // Current global recorder. This recorder is used by static methods. When a
-  // new global recorder is created by CreateTemporaryForTesting(), then the
-  // previous global recorder is referenced by top_->previous_.
-  static StatisticsRecorder* top_;
-
-  // Tracks whether InitLogOnShutdownWhileLocked() has registered a logging
-  // function that will be called when the program finishes.
-  static bool is_vlog_initialized_;
-
-  DISALLOW_COPY_AND_ASSIGN(StatisticsRecorder);
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_STATISTICS_RECORDER_H_
diff --git a/base/metrics/user_metrics.cc b/base/metrics/user_metrics.cc
deleted file mode 100644
index 9fcc9e8..0000000
--- a/base/metrics/user_metrics.cc
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/user_metrics.h"
-
-#include <stddef.h>
-
-#include <vector>
-
-#include "base/bind.h"
-#include "base/lazy_instance.h"
-#include "base/location.h"
-#include "base/macros.h"
-#include "base/threading/thread_checker.h"
-
-namespace base {
-namespace {
-
-LazyInstance<std::vector<ActionCallback>>::DestructorAtExit g_callbacks =
-    LAZY_INSTANCE_INITIALIZER;
-LazyInstance<scoped_refptr<SingleThreadTaskRunner>>::DestructorAtExit
-    g_task_runner = LAZY_INSTANCE_INITIALIZER;
-
-}  // namespace
-
-void RecordAction(const UserMetricsAction& action) {
-  RecordComputedAction(action.str_);
-}
-
-void RecordComputedAction(const std::string& action) {
-  if (!g_task_runner.Get()) {
-    DCHECK(g_callbacks.Get().empty());
-    return;
-  }
-
-  if (!g_task_runner.Get()->BelongsToCurrentThread()) {
-    g_task_runner.Get()->PostTask(FROM_HERE,
-                                  BindOnce(&RecordComputedAction, action));
-    return;
-  }
-
-  for (const ActionCallback& callback : g_callbacks.Get()) {
-    callback.Run(action);
-  }
-}
-
-void AddActionCallback(const ActionCallback& callback) {
-  // Only allow adding a callback if the task runner is set.
-  DCHECK(g_task_runner.Get());
-  DCHECK(g_task_runner.Get()->BelongsToCurrentThread());
-  g_callbacks.Get().push_back(callback);
-}
-
-void RemoveActionCallback(const ActionCallback& callback) {
-  DCHECK(g_task_runner.Get());
-  DCHECK(g_task_runner.Get()->BelongsToCurrentThread());
-  std::vector<ActionCallback>* callbacks = g_callbacks.Pointer();
-  for (size_t i = 0; i < callbacks->size(); ++i) {
-    if ((*callbacks)[i].Equals(callback)) {
-      callbacks->erase(callbacks->begin() + i);
-      return;
-    }
-  }
-}
-
-void SetRecordActionTaskRunner(
-    scoped_refptr<SingleThreadTaskRunner> task_runner) {
-  DCHECK(task_runner->BelongsToCurrentThread());
-  DCHECK(!g_task_runner.Get() || g_task_runner.Get()->BelongsToCurrentThread());
-  g_task_runner.Get() = task_runner;
-}
-
-}  // namespace base
diff --git a/base/metrics/user_metrics.h b/base/metrics/user_metrics.h
deleted file mode 100644
index 87fbd9c..0000000
--- a/base/metrics/user_metrics.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_USER_METRICS_H_
-#define BASE_METRICS_USER_METRICS_H_
-
-#include <string>
-
-#include "base/base_export.h"
-#include "base/callback.h"
-#include "base/metrics/user_metrics_action.h"
-#include "base/single_thread_task_runner.h"
-
-namespace base {
-
-// This module provides some helper functions for logging actions tracked by
-// the user metrics system.
-
-// For best practices on deciding when to emit a user action, see
-// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/actions/README.md
-
-// Record that the user performed an action.
-// This function must be called after the task runner has been set with
-// SetRecordActionTaskRunner().
-//
-// "Action" here means a user-generated event:
-//   good: "Reload", "CloseTab", and "IMEInvoked"
-//   not good: "SSLDialogShown", "PageLoaded", "DiskFull"
-// We use this to gather anonymized information about how users are
-// interacting with the browser.
-// WARNING: In calls to this function, UserMetricsAction should be followed by a
-// string literal parameter and not a variable e.g.
-//   RecordAction(UserMetricsAction("my action name"));
-// This ensures that our processing scripts can associate this action's hash
-// with its metric name. Therefore, it will be possible to retrieve the metric
-// name from the hash later on.
-//
-// Once a new recorded action is added, run
-//   tools/metrics/actions/extract_actions.py
-// to add the metric to actions.xml, then update the <owner>s and <description>
-// sections. Make sure to include the actions.xml file when you upload your code
-// for review!
-//
-// For more complicated situations (like when there are many different
-// possible actions), see RecordComputedAction().
-BASE_EXPORT void RecordAction(const UserMetricsAction& action);
-
-// This function has identical input and behavior to RecordAction(), but is
-// not automatically found by the action-processing scripts.  It can be used
-// when it's a pain to enumerate all possible actions, but if you use this
-// you need to also update the rules for extracting known actions in
-// tools/metrics/actions/extract_actions.py.
-// This function must be called after the task runner has been set with
-// SetRecordActionTaskRunner().
-BASE_EXPORT void RecordComputedAction(const std::string& action);
-
-// Called with the action string.
-typedef Callback<void(const std::string&)> ActionCallback;
-
-// Add/remove action callbacks (see above).
-// These functions must be called after the task runner has been set with
-// SetRecordActionTaskRunner().
-BASE_EXPORT void AddActionCallback(const ActionCallback& callback);
-BASE_EXPORT void RemoveActionCallback(const ActionCallback& callback);
-
-// Set the task runner on which to record actions.
-BASE_EXPORT void SetRecordActionTaskRunner(
-    scoped_refptr<SingleThreadTaskRunner> task_runner);
-
-}  // namespace base
-
-#endif  // BASE_METRICS_USER_METRICS_H_
diff --git a/base/metrics/user_metrics_action.h b/base/metrics/user_metrics_action.h
deleted file mode 100644
index 454ed83..0000000
--- a/base/metrics/user_metrics_action.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_USER_METRICS_ACTION_H_
-#define BASE_METRICS_USER_METRICS_ACTION_H_
-
-namespace base {
-
-// UserMetricsAction exists purely to standardize on the parameters passed to
-// UserMetrics. That way, our toolset can scan the source code reliable for
-// constructors and extract the associated string constants.
-// WARNING: When using UserMetricsAction you should use a string literal
-// parameter e.g.
-//   RecordAction(UserMetricsAction("my action name"));
-// This ensures that our processing scripts can associate this action's hash
-// with its metric name. Therefore, it will be possible to retrieve the metric
-// name from the hash later on.
-// Please see tools/metrics/actions/extract_actions.py for details.
-struct UserMetricsAction {
-  const char* str_;
-  explicit constexpr UserMetricsAction(const char* str) noexcept : str_(str) {}
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_USER_METRICS_ACTION_H_
diff --git a/base/process/kill_posix.cc b/base/process/kill_posix.cc
index 5159c19..7d75095 100644
--- a/base/process/kill_posix.cc
+++ b/base/process/kill_posix.cc
@@ -10,7 +10,6 @@
 #include <sys/wait.h>
 #include <unistd.h>
 
-#include "base/debug/activity_tracker.h"
 #include "base/files/file_util.h"
 #include "base/logging.h"
 #include "base/macros.h"
diff --git a/base/process/process.h b/base/process/process.h
index 479e24d..2826be3 100644
--- a/base/process/process.h
+++ b/base/process/process.h
@@ -20,16 +20,11 @@
 #endif
 
 #if defined(OS_MACOSX)
-#include "base/feature_list.h"
 #include "base/process/port_provider_mac.h"
 #endif
 
 namespace base {
 
-#if defined(OS_MACOSX)
-extern const Feature kMacAllowBackgroundingProcesses;
-#endif
-
 // Provides a move-only encapsulation of a process.
 //
 // This object is not tied to the lifetime of the underlying process: the
diff --git a/base/process/process_mac.cc b/base/process/process_mac.cc
index 70bc4c2..cd47c62 100644
--- a/base/process/process_mac.cc
+++ b/base/process/process_mac.cc
@@ -6,17 +6,12 @@
 
 #include <mach/mach.h>
 
-#include "base/feature_list.h"
 #include "base/mac/mach_logging.h"
 
 namespace base {
 
-// Enables backgrounding hidden renderers on Mac.
-const Feature kMacAllowBackgroundingProcesses{"MacAllowBackgroundingProcesses",
-                                              FEATURE_DISABLED_BY_DEFAULT};
-
 bool Process::CanBackgroundProcesses() {
-  return FeatureList::IsEnabled(kMacAllowBackgroundingProcesses);
+  return false;
 }
 
 bool Process::IsProcessBackgrounded(PortProvider* port_provider) const {
diff --git a/base/process/process_posix.cc b/base/process/process_posix.cc
index 51b57e1..6b758a2 100644
--- a/base/process/process_posix.cc
+++ b/base/process/process_posix.cc
@@ -10,7 +10,6 @@
 #include <sys/resource.h>
 #include <sys/wait.h>
 
-#include "base/debug/activity_tracker.h"
 #include "base/files/scoped_file.h"
 #include "base/logging.h"
 #include "base/posix/eintr_wrapper.h"
@@ -339,9 +338,6 @@
   if (!timeout.is_zero())
     internal::AssertBaseSyncPrimitivesAllowed();
 
-  // Record the event that this thread is blocking upon (for hang diagnosis).
-  base::debug::ScopedProcessWaitActivity process_activity(this);
-
   int local_exit_code;
   bool exited = WaitForExitWithTimeoutImpl(Handle(), &local_exit_code, timeout);
   if (exited) {
diff --git a/base/synchronization/lock_impl_posix.cc b/base/synchronization/lock_impl_posix.cc
index 1cfa88a..3812fe2 100644
--- a/base/synchronization/lock_impl_posix.cc
+++ b/base/synchronization/lock_impl_posix.cc
@@ -6,7 +6,6 @@
 
 #include <string>
 
-#include "base/debug/activity_tracker.h"
 #include "base/logging.h"
 #include "base/posix/safe_strerror.h"
 #include "base/strings/stringprintf.h"
@@ -86,18 +85,6 @@
 }
 
 void LockImpl::Lock() {
-  // The ScopedLockAcquireActivity below is relatively expensive and so its
-  // actions can become significant due to the very large number of locks
-  // that tend to be used throughout the build. To avoid this cost in the
-  // vast majority of the calls, simply "try" the lock first and only do the
-  // (tracked) blocking call if that fails. Since "try" itself is a system
-  // call, and thus also somewhat expensive, don't bother with it unless
-  // tracking is actually enabled.
-  if (base::debug::GlobalActivityTracker::IsEnabled())
-    if (Try())
-      return;
-
-  base::debug::ScopedLockAcquireActivity lock_activity(this);
   int rv = pthread_mutex_lock(&native_handle_);
   DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
 }
diff --git a/base/synchronization/waitable_event_mac.cc b/base/synchronization/waitable_event_mac.cc
index 56e6cb3..7979553 100644
--- a/base/synchronization/waitable_event_mac.cc
+++ b/base/synchronization/waitable_event_mac.cc
@@ -8,7 +8,7 @@
 #include <mach/mach.h>
 #include <sys/event.h>
 
-#include "base/debug/activity_tracker.h"
+#include "base/callback.h"
 #include "base/files/scoped_file.h"
 #include "base/mac/dispatch_source_mach.h"
 #include "base/mac/mac_util.h"
@@ -113,8 +113,6 @@
 bool WaitableEvent::TimedWaitUntil(const TimeTicks& end_time) {
   internal::AssertBaseSyncPrimitivesAllowed();
   ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
-  // Record the event that this thread is blocking upon (for hang diagnosis).
-  debug::ScopedEventWaitActivity event_activity(this);
 
   TimeDelta wait_time = end_time - TimeTicks::Now();
   if (wait_time < TimeDelta()) {
@@ -169,8 +167,6 @@
   internal::AssertBaseSyncPrimitivesAllowed();
   DCHECK(count) << "Cannot wait on no events";
   ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
-  // Record an event (the first) that this thread is blocking upon.
-  debug::ScopedEventWaitActivity event_activity(raw_waitables[0]);
 
   // On macOS 10.11+, using Mach port sets may cause system instability, per
   // https://crbug.com/756102. On macOS 10.12+, a kqueue can be used
diff --git a/base/synchronization/waitable_event_posix.cc b/base/synchronization/waitable_event_posix.cc
index 9799e7d..34d54c7 100644
--- a/base/synchronization/waitable_event_posix.cc
+++ b/base/synchronization/waitable_event_posix.cc
@@ -8,7 +8,6 @@
 #include <limits>
 #include <vector>
 
-#include "base/debug/activity_tracker.h"
 #include "base/logging.h"
 #include "base/synchronization/condition_variable.h"
 #include "base/synchronization/lock.h"
@@ -164,8 +163,6 @@
 bool WaitableEvent::TimedWaitUntil(const TimeTicks& end_time) {
   internal::AssertBaseSyncPrimitivesAllowed();
   ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
-  // Record the event that this thread is blocking upon (for hang diagnosis).
-  base::debug::ScopedEventWaitActivity event_activity(this);
 
   const bool finite_time = !end_time.is_max();
 
@@ -240,8 +237,6 @@
   internal::AssertBaseSyncPrimitivesAllowed();
   DCHECK(count) << "Cannot wait on no events";
   ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
-  // Record an event (the first) that this thread is blocking upon.
-  base::debug::ScopedEventWaitActivity event_activity(raw_waitables[0]);
 
   // We need to acquire the locks in a globally consistent order. Thus we sort
   // the array of waitables by address. We actually sort a pairs so that we can
diff --git a/base/task_scheduler/scheduler_worker_pool_impl.cc b/base/task_scheduler/scheduler_worker_pool_impl.cc
index 0e0c107..9e7e892 100644
--- a/base/task_scheduler/scheduler_worker_pool_impl.cc
+++ b/base/task_scheduler/scheduler_worker_pool_impl.cc
@@ -16,7 +16,6 @@
 #include "base/compiler_specific.h"
 #include "base/location.h"
 #include "base/memory/ptr_util.h"
-#include "base/metrics/histogram.h"
 #include "base/sequence_token.h"
 #include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
@@ -41,13 +40,6 @@
 
 namespace {
 
-constexpr char kPoolNameSuffix[] = "Pool";
-constexpr char kDetachDurationHistogramPrefix[] =
-    "TaskScheduler.DetachDuration.";
-constexpr char kNumTasksBeforeDetachHistogramPrefix[] =
-    "TaskScheduler.NumTasksBeforeDetach.";
-constexpr char kNumTasksBetweenWaitsHistogramPrefix[] =
-    "TaskScheduler.NumTasksBetweenWaits.";
 constexpr size_t kMaxNumberOfWorkers = 256;
 
 // Only used in DCHECKs.
@@ -168,38 +160,6 @@
       priority_hint_(priority_hint),
       lock_(shared_priority_queue_.container_lock()),
       idle_workers_stack_cv_for_testing_(lock_.CreateConditionVariable()),
-      // Mimics the UMA_HISTOGRAM_LONG_TIMES macro.
-      detach_duration_histogram_(Histogram::FactoryTimeGet(
-          JoinString({kDetachDurationHistogramPrefix, histogram_label,
-                      kPoolNameSuffix},
-                     ""),
-          TimeDelta::FromMilliseconds(1),
-          TimeDelta::FromHours(1),
-          50,
-          HistogramBase::kUmaTargetedHistogramFlag)),
-      // Mimics the UMA_HISTOGRAM_COUNTS_1000 macro. When a worker runs more
-      // than 1000 tasks before detaching, there is no need to know the exact
-      // number of tasks that ran.
-      num_tasks_before_detach_histogram_(Histogram::FactoryGet(
-          JoinString({kNumTasksBeforeDetachHistogramPrefix, histogram_label,
-                      kPoolNameSuffix},
-                     ""),
-          1,
-          1000,
-          50,
-          HistogramBase::kUmaTargetedHistogramFlag)),
-      // Mimics the UMA_HISTOGRAM_COUNTS_100 macro. A SchedulerWorker is
-      // expected to run between zero and a few tens of tasks between waits.
-      // When it runs more than 100 tasks, there is no need to know the exact
-      // number of tasks that ran.
-      num_tasks_between_waits_histogram_(Histogram::FactoryGet(
-          JoinString({kNumTasksBetweenWaitsHistogramPrefix, histogram_label,
-                      kPoolNameSuffix},
-                     ""),
-          1,
-          100,
-          50,
-          HistogramBase::kUmaTargetedHistogramFlag)),
       tracked_ref_factory_(this) {
   DCHECK(!histogram_label.empty());
   DCHECK(!pool_label_.empty());
@@ -271,12 +231,6 @@
   WakeUpOneWorker();
 }
 
-void SchedulerWorkerPoolImpl::GetHistograms(
-    std::vector<const HistogramBase*>* histograms) const {
-  histograms->push_back(detach_duration_histogram_);
-  histograms->push_back(num_tasks_between_waits_histogram_);
-}
-
 int SchedulerWorkerPoolImpl::GetMaxConcurrentNonBlockedTasksDeprecated() const {
 #if DCHECK_IS_ON()
   AutoSchedulerLock auto_lock(lock_);
@@ -521,7 +475,6 @@
   DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
 
   outer_->lock_.AssertAcquired();
-  outer_->num_tasks_before_detach_histogram_->Add(num_tasks_since_last_detach_);
   outer_->cleanup_timestamps_.push(TimeTicks::Now());
   worker->Cleanup();
   outer_->RemoveFromIdleWorkersStackLockRequired(worker);
@@ -546,7 +499,6 @@
   // returns nullptr, the SchedulerWorker will perform a wait on its
   // WaitableEvent, so we record how many tasks were ran since the last wait
   // here.
-  outer_->num_tasks_between_waits_histogram_->Add(num_tasks_since_last_wait_);
   num_tasks_since_last_wait_ = 0;
   outer_->AddToIdleWorkersStackLockRequired(worker);
   SetIsOnIdleWorkersStackLockRequired(worker);
@@ -843,8 +795,6 @@
   DCHECK_LE(workers_.size(), worker_capacity_);
 
   if (!cleanup_timestamps_.empty()) {
-    detach_duration_histogram_->AddTime(TimeTicks::Now() -
-                                        cleanup_timestamps_.top());
     cleanup_timestamps_.pop();
   }
   return worker.get();
diff --git a/base/task_scheduler/scheduler_worker_pool_impl.h b/base/task_scheduler/scheduler_worker_pool_impl.h
index 997fcc9..d331d3c 100644
--- a/base/task_scheduler/scheduler_worker_pool_impl.h
+++ b/base/task_scheduler/scheduler_worker_pool_impl.h
@@ -34,7 +34,6 @@
 
 namespace base {
 
-class HistogramBase;
 class SchedulerWorkerObserver;
 class SchedulerWorkerPoolParams;
 
@@ -95,16 +94,6 @@
   // SchedulerWorkerPool:
   void JoinForTesting() override;
 
-  const HistogramBase* num_tasks_before_detach_histogram() const {
-    return num_tasks_before_detach_histogram_;
-  }
-
-  const HistogramBase* num_tasks_between_waits_histogram() const {
-    return num_tasks_between_waits_histogram_;
-  }
-
-  void GetHistograms(std::vector<const HistogramBase*>* histograms) const;
-
   // Returns the maximum number of non-blocked tasks that can run concurrently
   // in this pool.
   //
@@ -306,18 +295,6 @@
   AtomicFlag join_for_testing_started_;
 #endif
 
-  // TaskScheduler.DetachDuration.[worker pool name] histogram. Intentionally
-  // leaked.
-  HistogramBase* const detach_duration_histogram_;
-
-  // TaskScheduler.NumTasksBeforeDetach.[worker pool name] histogram.
-  // Intentionally leaked.
-  HistogramBase* const num_tasks_before_detach_histogram_;
-
-  // TaskScheduler.NumTasksBetweenWaits.[worker pool name] histogram.
-  // Intentionally leaked.
-  HistogramBase* const num_tasks_between_waits_histogram_;
-
   scoped_refptr<TaskRunner> service_thread_task_runner_;
 
   // Optional observer notified when a worker enters and exits its main
diff --git a/base/task_scheduler/service_thread.cc b/base/task_scheduler/service_thread.cc
index 40f217f..ce8bf4d 100644
--- a/base/task_scheduler/service_thread.cc
+++ b/base/task_scheduler/service_thread.cc
@@ -13,17 +13,9 @@
 namespace base {
 namespace internal {
 
-ServiceThread::ServiceThread(const TaskTracker* task_tracker)
-    : Thread("TaskSchedulerServiceThread"), task_tracker_(task_tracker) {}
+ServiceThread::ServiceThread() : Thread("TaskSchedulerServiceThread") {}
 
-void ServiceThread::Init() {
-  if (task_tracker_) {
-    heartbeat_latency_timer_.Start(
-        FROM_HERE, TimeDelta::FromSeconds(5),
-        BindRepeating(&ServiceThread::PerformHeartbeatLatencyReport,
-                      Unretained(this)));
-  }
-}
+void ServiceThread::Init() {}
 
 NOINLINE void ServiceThread::Run(RunLoop* run_loop) {
   const int line_number = __LINE__;
@@ -31,23 +23,5 @@
   base::debug::Alias(&line_number);
 }
 
-void ServiceThread::PerformHeartbeatLatencyReport() const {
-  static constexpr TaskTraits kReportedTraits[] = {
-      {TaskPriority::BACKGROUND},    {TaskPriority::BACKGROUND, MayBlock()},
-      {TaskPriority::USER_VISIBLE},  {TaskPriority::USER_VISIBLE, MayBlock()},
-      {TaskPriority::USER_BLOCKING}, {TaskPriority::USER_BLOCKING, MayBlock()}};
-
-  for (auto& traits : kReportedTraits) {
-    // Post through the static API to time the full stack. Use a new Now() for
-    // every set of traits in case PostTaskWithTraits() itself is slow.
-    base::PostTaskWithTraits(
-        FROM_HERE, traits,
-        BindOnce(&TaskTracker::RecordLatencyHistogram,
-                 Unretained(task_tracker_),
-                 TaskTracker::LatencyHistogramType::HEARTBEAT_LATENCY, traits,
-                 TimeTicks::Now()));
-  }
-}
-
 }  // namespace internal
 }  // namespace base
diff --git a/base/task_scheduler/service_thread.h b/base/task_scheduler/service_thread.h
index f9b23fa..14ccd76 100644
--- a/base/task_scheduler/service_thread.h
+++ b/base/task_scheduler/service_thread.h
@@ -27,24 +27,13 @@
   // |task_tracker| if non-null. In that case, this ServiceThread will assume a
   // registered TaskScheduler instance and that |task_tracker| will outlive this
   // ServiceThread.
-  explicit ServiceThread(const TaskTracker* task_tracker);
+  ServiceThread();
 
  private:
   // Thread:
   void Init() override;
   void Run(RunLoop* run_loop) override;
 
-  // Kicks off async tasks which will record a histogram on the latency of
-  // various traits.
-  void PerformHeartbeatLatencyReport() const;
-
-  const TaskTracker* const task_tracker_;
-
-  // Fires a recurring heartbeat task to record latency histograms which are
-  // independent from any execution sequence. This is done on the service thread
-  // to avoid all external dependencies (even main thread).
-  base::RepeatingTimer heartbeat_latency_timer_;
-
   DISALLOW_COPY_AND_ASSIGN(ServiceThread);
 };
 
diff --git a/base/task_scheduler/task_scheduler.h b/base/task_scheduler/task_scheduler.h
index 8881028..3135f8c 100644
--- a/base/task_scheduler/task_scheduler.h
+++ b/base/task_scheduler/task_scheduler.h
@@ -136,9 +136,6 @@
       SingleThreadTaskRunnerThreadMode thread_mode) = 0;
 #endif  // defined(OS_WIN)
 
-  // Returns a vector of all histograms available in this task scheduler.
-  virtual std::vector<const HistogramBase*> GetHistograms() const = 0;
-
   // Synchronously shuts down the scheduler. Once this is called, only tasks
   // posted with the BLOCK_SHUTDOWN behavior will be run. When this returns:
   // - All SKIP_ON_SHUTDOWN tasks that were already running have completed their
diff --git a/base/task_scheduler/task_scheduler_impl.cc b/base/task_scheduler/task_scheduler_impl.cc
index a5ab06c..88250c2 100644
--- a/base/task_scheduler/task_scheduler_impl.cc
+++ b/base/task_scheduler/task_scheduler_impl.cc
@@ -9,7 +9,6 @@
 
 #include "base/compiler_specific.h"
 #include "base/message_loop/message_loop.h"
-#include "base/metrics/field_trial_params.h"
 #include "base/strings/string_util.h"
 #include "base/task_scheduler/delayed_task_manager.h"
 #include "base/task_scheduler/environment_config.h"
@@ -32,7 +31,7 @@
     StringPiece histogram_label,
     std::unique_ptr<TaskTrackerImpl> task_tracker)
     : task_tracker_(std::move(task_tracker)),
-      service_thread_(std::make_unique<ServiceThread>(task_tracker_.get())),
+      service_thread_(std::make_unique<ServiceThread>()),
       single_thread_task_runner_manager_(task_tracker_->GetTrackedRef(),
                                          &delayed_task_manager_) {
   DCHECK(!histogram_label.empty());
@@ -64,13 +63,6 @@
 void TaskSchedulerImpl::Start(
     const TaskScheduler::InitParams& init_params,
     SchedulerWorkerObserver* scheduler_worker_observer) {
-  // This is set in Start() and not in the constructor because variation params
-  // are usually not ready when TaskSchedulerImpl is instantiated in a process.
-  if (base::GetFieldTrialParamValue("BrowserScheduler",
-                                    "AllTasksUserBlocking") == "true") {
-    all_tasks_user_blocking_.Set();
-  }
-
   // Start the service thread. On platforms that support it (POSIX except NaCL
   // SFI), the service thread runs a MessageLoopForIO which is used to support
   // FileDescriptorWatcher in the scope in which tasks run.
@@ -174,14 +166,6 @@
 }
 #endif  // defined(OS_WIN)
 
-std::vector<const HistogramBase*> TaskSchedulerImpl::GetHistograms() const {
-  std::vector<const HistogramBase*> histograms;
-  for (const auto& worker_pool : worker_pools_)
-    worker_pool->GetHistograms(&histograms);
-
-  return histograms;
-}
-
 int TaskSchedulerImpl::GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
     const TaskTraits& traits) const {
   return GetWorkerPoolForTraits(traits)
@@ -225,9 +209,7 @@
 
 TaskTraits TaskSchedulerImpl::SetUserBlockingPriorityIfNeeded(
     const TaskTraits& traits) const {
-  return all_tasks_user_blocking_.IsSet()
-             ? TaskTraits::Override(traits, {TaskPriority::USER_BLOCKING})
-             : traits;
+  return traits;
 }
 
 }  // namespace internal
diff --git a/base/task_scheduler/task_scheduler_impl.h b/base/task_scheduler/task_scheduler_impl.h
index 4ad7fc2..f409dc5 100644
--- a/base/task_scheduler/task_scheduler_impl.h
+++ b/base/task_scheduler/task_scheduler_impl.h
@@ -79,7 +79,6 @@
       const TaskTraits& traits,
       SingleThreadTaskRunnerThreadMode thread_mode) override;
 #endif  // defined(OS_WIN)
-  std::vector<const HistogramBase*> GetHistograms() const override;
   int GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
       const TaskTraits& traits) const override;
   void Shutdown() override;
@@ -101,14 +100,6 @@
   DelayedTaskManager delayed_task_manager_;
   SchedulerSingleThreadTaskRunnerManager single_thread_task_runner_manager_;
 
-  // Indicates that all tasks are handled as if they had been posted with
-  // TaskPriority::USER_BLOCKING. Since this is set in Start(), it doesn't apply
-  // to tasks posted before Start() or to tasks posted to TaskRunners created
-  // before Start().
-  //
-  // TODO(fdoray): Remove after experiment. https://crbug.com/757022
-  AtomicFlag all_tasks_user_blocking_;
-
   // There are 4 SchedulerWorkerPoolImpl in this array to match the 4
   // SchedulerWorkerPoolParams in TaskScheduler::InitParams.
   std::unique_ptr<SchedulerWorkerPoolImpl> worker_pools_[4];
diff --git a/base/task_scheduler/task_tracker.cc b/base/task_scheduler/task_tracker.cc
index 4fd2356..33424bb 100644
--- a/base/task_scheduler/task_tracker.cc
+++ b/base/task_scheduler/task_tracker.cc
@@ -13,7 +13,6 @@
 #include "base/command_line.h"
 #include "base/json/json_writer.h"
 #include "base/memory/ptr_util.h"
-#include "base/metrics/histogram_macros.h"
 #include "base/sequence_token.h"
 #include "base/strings/string_util.h"
 #include "base/synchronization/condition_variable.h"
@@ -52,45 +51,6 @@
   DISALLOW_COPY_AND_ASSIGN(TaskTracingInfo);
 };
 
-// These name conveys that a Task is posted to/run by the task scheduler without
-// revealing its implementation details.
-constexpr char kQueueFunctionName[] = "TaskScheduler PostTask";
-constexpr char kRunFunctionName[] = "TaskScheduler RunTask";
-
-// Constructs a histogram to track latency which is logging to
-// "TaskScheduler.{histogram_name}.{histogram_label}.{task_type_suffix}".
-HistogramBase* GetLatencyHistogram(StringPiece histogram_name,
-                                   StringPiece histogram_label,
-                                   StringPiece task_type_suffix) {
-  DCHECK(!histogram_name.empty());
-  DCHECK(!histogram_label.empty());
-  DCHECK(!task_type_suffix.empty());
-  // Mimics the UMA_HISTOGRAM_HIGH_RESOLUTION_CUSTOM_TIMES macro. The minimums
-  // and maximums were chosen to place the 1ms mark at around the 70% range
-  // coverage for buckets giving us good info for tasks that have a latency
-  // below 1ms (most of them) and enough info to assess how bad the latency is
-  // for tasks that exceed this threshold.
-  const std::string histogram = JoinString(
-      {"TaskScheduler", histogram_name, histogram_label, task_type_suffix},
-      ".");
-  return Histogram::FactoryMicrosecondsTimeGet(
-      histogram, TimeDelta::FromMicroseconds(1),
-      TimeDelta::FromMilliseconds(20), 50,
-      HistogramBase::kUmaTargetedHistogramFlag);
-}
-
-// Upper bound for the
-// TaskScheduler.BlockShutdownTasksPostedDuringShutdown histogram.
-constexpr HistogramBase::Sample kMaxBlockShutdownTasksPostedDuringShutdown =
-    1000;
-
-void RecordNumBlockShutdownTasksPostedDuringShutdown(
-    HistogramBase::Sample value) {
-  UMA_HISTOGRAM_CUSTOM_COUNTS(
-      "TaskScheduler.BlockShutdownTasksPostedDuringShutdown", value, 1,
-      kMaxBlockShutdownTasksPostedDuringShutdown, 50);
-}
-
 // Returns the maximum number of TaskPriority::BACKGROUND sequences that can be
 // scheduled concurrently based on command line flags.
 int GetMaxNumScheduledBackgroundSequences() {
@@ -235,49 +195,7 @@
       shutdown_lock_(&flush_lock_),
       max_num_scheduled_background_sequences_(
           max_num_scheduled_background_sequences),
-      task_latency_histograms_{
-          {GetLatencyHistogram("TaskLatencyMicroseconds",
-                               histogram_label,
-                               "BackgroundTaskPriority"),
-           GetLatencyHistogram("TaskLatencyMicroseconds",
-                               histogram_label,
-                               "BackgroundTaskPriority_MayBlock")},
-          {GetLatencyHistogram("TaskLatencyMicroseconds",
-                               histogram_label,
-                               "UserVisibleTaskPriority"),
-           GetLatencyHistogram("TaskLatencyMicroseconds",
-                               histogram_label,
-                               "UserVisibleTaskPriority_MayBlock")},
-          {GetLatencyHistogram("TaskLatencyMicroseconds",
-                               histogram_label,
-                               "UserBlockingTaskPriority"),
-           GetLatencyHistogram("TaskLatencyMicroseconds",
-                               histogram_label,
-                               "UserBlockingTaskPriority_MayBlock")}},
-      heartbeat_latency_histograms_{
-          {GetLatencyHistogram("HeartbeatLatencyMicroseconds",
-                               histogram_label,
-                               "BackgroundTaskPriority"),
-           GetLatencyHistogram("HeartbeatLatencyMicroseconds",
-                               histogram_label,
-                               "BackgroundTaskPriority_MayBlock")},
-          {GetLatencyHistogram("HeartbeatLatencyMicroseconds",
-                               histogram_label,
-                               "UserVisibleTaskPriority"),
-           GetLatencyHistogram("HeartbeatLatencyMicroseconds",
-                               histogram_label,
-                               "UserVisibleTaskPriority_MayBlock")},
-          {GetLatencyHistogram("HeartbeatLatencyMicroseconds",
-                               histogram_label,
-                               "UserBlockingTaskPriority"),
-           GetLatencyHistogram("HeartbeatLatencyMicroseconds",
-                               histogram_label,
-                               "UserBlockingTaskPriority_MayBlock")}},
       tracked_ref_factory_(this) {
-  // Confirm that all |task_latency_histograms_| have been initialized above.
-  DCHECK(*(&task_latency_histograms_[static_cast<int>(TaskPriority::HIGHEST) +
-                                     1][0] -
-           1));
 }
 
 TaskTracker::~TaskTracker() = default;
@@ -419,22 +337,9 @@
   state_->StartShutdown();
 }
 
-void TaskTracker::RecordLatencyHistogram(
-    LatencyHistogramType latency_histogram_type,
-    TaskTraits task_traits,
-    TimeTicks posted_time) const {
-  const TimeDelta task_latency = TimeTicks::Now() - posted_time;
-
-  DCHECK(latency_histogram_type == LatencyHistogramType::TASK_LATENCY ||
-         latency_histogram_type == LatencyHistogramType::HEARTBEAT_LATENCY);
-}
-
 void TaskTracker::RunOrSkipTask(Task task,
                                 Sequence* sequence,
                                 bool can_run_task) {
-  RecordLatencyHistogram(LatencyHistogramType::TASK_LATENCY, task.traits,
-                         task.sequenced_time);
-
   const bool previous_singleton_allowed =
       ThreadRestrictions::SetSingletonAllowed(
           task.traits.shutdown_behavior() !=
@@ -488,7 +393,6 @@
 
     // This method can only be called once.
     DCHECK(!shutdown_event_);
-    DCHECK(!num_block_shutdown_tasks_posted_during_shutdown_);
     DCHECK(!state_->HasShutdownStarted());
 
     shutdown_event_.reset(
@@ -523,20 +427,6 @@
     base::ThreadRestrictions::ScopedAllowWait allow_wait;
     shutdown_event_->Wait();
   }
-
-  {
-    AutoSchedulerLock auto_lock(shutdown_lock_);
-
-    // Record TaskScheduler.BlockShutdownTasksPostedDuringShutdown if less than
-    // |kMaxBlockShutdownTasksPostedDuringShutdown| BLOCK_SHUTDOWN tasks were
-    // posted during shutdown. Otherwise, the histogram has already been
-    // recorded in BeforePostTask().
-    if (num_block_shutdown_tasks_posted_during_shutdown_ <
-        kMaxBlockShutdownTasksPostedDuringShutdown) {
-      RecordNumBlockShutdownTasksPostedDuringShutdown(
-          num_block_shutdown_tasks_posted_during_shutdown_);
-    }
-  }
 }
 
 void TaskTracker::SetMaxNumScheduledBackgroundSequences(
@@ -626,18 +516,6 @@
         state_->DecrementNumTasksBlockingShutdown();
         return false;
       }
-
-      ++num_block_shutdown_tasks_posted_during_shutdown_;
-
-      if (num_block_shutdown_tasks_posted_during_shutdown_ ==
-          kMaxBlockShutdownTasksPostedDuringShutdown) {
-        // Record the TaskScheduler.BlockShutdownTasksPostedDuringShutdown
-        // histogram as soon as its upper bound is hit. That way, a value will
-        // be recorded even if an infinite number of BLOCK_SHUTDOWN tasks are
-        // posted, preventing shutdown to complete.
-        RecordNumBlockShutdownTasksPostedDuringShutdown(
-            num_block_shutdown_tasks_posted_during_shutdown_);
-      }
     }
 
     return true;
diff --git a/base/task_scheduler/task_tracker.h b/base/task_scheduler/task_tracker.h
index 760a8f7..ae484ce 100644
--- a/base/task_scheduler/task_tracker.h
+++ b/base/task_scheduler/task_tracker.h
@@ -15,7 +15,6 @@
 #include "base/debug/task_annotator.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/metrics/histogram_base.h"
 #include "base/strings/string_piece.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/task_scheduler/can_schedule_sequence_observer.h"
@@ -28,7 +27,6 @@
 namespace base {
 
 class ConditionVariable;
-class HistogramBase;
 
 namespace internal {
 
@@ -160,27 +158,11 @@
   // Returns true if shutdown has completed (Shutdown() has returned).
   bool IsShutdownComplete() const;
 
-  enum class LatencyHistogramType {
-    // Records the latency of each individual task posted through TaskTracker.
-    TASK_LATENCY,
-    // Records the latency of heartbeat tasks which are independent of current
-    // workload. These avoid a bias towards TASK_LATENCY reporting that high-
-    // priority tasks are "slower" than regular tasks because high-priority
-    // tasks tend to be correlated with heavy workloads.
-    HEARTBEAT_LATENCY,
-  };
-
   // Causes HasShutdownStarted() to return true. Unlike when Shutdown() returns,
   // IsShutdownComplete() won't return true after this returns. Shutdown()
   // cannot be called after this.
   void SetHasShutdownStartedForTesting();
 
-  // Records |Now() - posted_time| to the appropriate |latency_histogram_type|
-  // based on |task_traits|.
-  void RecordLatencyHistogram(LatencyHistogramType latency_histogram_type,
-                              TaskTraits task_traits,
-                              TimeTicks posted_time) const;
-
   TrackedRef<TaskTracker> GetTrackedRef() {
     return tracked_ref_factory_.GetTrackedRef();
   }
@@ -331,20 +313,6 @@
   // Number of currently scheduled background sequences.
   int num_scheduled_background_sequences_ = 0;
 
-  // TaskScheduler.TaskLatencyMicroseconds.* and
-  // TaskScheduler.HeartbeatLatencyMicroseconds.* histograms. The first index is
-  // a TaskPriority. The second index is 0 for non-blocking tasks, 1 for
-  // blocking tasks. Intentionally leaked.
-  // TODO(scheduler-dev): Consider using STATIC_HISTOGRAM_POINTER_GROUP for
-  // these.
-  static constexpr int kNumTaskPriorities =
-      static_cast<int>(TaskPriority::HIGHEST) + 1;
-  HistogramBase* const task_latency_histograms_[kNumTaskPriorities][2];
-  HistogramBase* const heartbeat_latency_histograms_[kNumTaskPriorities][2];
-
-  // Number of BLOCK_SHUTDOWN tasks posted during shutdown.
-  HistogramBase::Sample num_block_shutdown_tasks_posted_during_shutdown_ = 0;
-
   // Ensures all state (e.g. dangling cleaned up workers) is coalesced before
   // destroying the TaskTracker (e.g. in test environments).
   // Ref. https://crbug.com/827615.
diff --git a/base/test/histogram_tester.cc b/base/test/histogram_tester.cc
deleted file mode 100644
index 2a63b8c..0000000
--- a/base/test/histogram_tester.cc
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/test/histogram_tester.h"
-
-#include <stddef.h>
-
-#include "base/metrics/histogram.h"
-#include "base/metrics/histogram_samples.h"
-#include "base/metrics/metrics_hashes.h"
-#include "base/metrics/sample_map.h"
-#include "base/metrics/statistics_recorder.h"
-#include "base/strings/string_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-
-HistogramTester::HistogramTester() {
-  // Record any histogram data that exists when the object is created so it can
-  // be subtracted later.
-  for (const auto* const histogram : StatisticsRecorder::GetHistograms()) {
-    histograms_snapshot_[histogram->histogram_name()] =
-        histogram->SnapshotSamples();
-  }
-}
-
-HistogramTester::~HistogramTester() = default;
-
-void HistogramTester::ExpectUniqueSample(
-    const std::string& name,
-    HistogramBase::Sample sample,
-    HistogramBase::Count expected_count) const {
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
-  EXPECT_NE(nullptr, histogram) << "Histogram \"" << name
-                                << "\" does not exist.";
-
-  if (histogram) {
-    std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
-    CheckBucketCount(name, sample, expected_count, *samples);
-    CheckTotalCount(name, expected_count, *samples);
-  }
-}
-
-void HistogramTester::ExpectBucketCount(
-    const std::string& name,
-    HistogramBase::Sample sample,
-    HistogramBase::Count expected_count) const {
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
-  EXPECT_NE(nullptr, histogram) << "Histogram \"" << name
-                                << "\" does not exist.";
-
-  if (histogram) {
-    std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
-    CheckBucketCount(name, sample, expected_count, *samples);
-  }
-}
-
-void HistogramTester::ExpectTotalCount(const std::string& name,
-                                       HistogramBase::Count count) const {
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
-  if (histogram) {
-    std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
-    CheckTotalCount(name, count, *samples);
-  } else {
-    // No histogram means there were zero samples.
-    EXPECT_EQ(count, 0) << "Histogram \"" << name << "\" does not exist.";
-  }
-}
-
-void HistogramTester::ExpectTimeBucketCount(const std::string& name,
-                                            TimeDelta sample,
-                                            HistogramBase::Count count) const {
-  ExpectBucketCount(name, sample.InMilliseconds(), count);
-}
-
-std::vector<Bucket> HistogramTester::GetAllSamples(
-    const std::string& name) const {
-  std::vector<Bucket> samples;
-  std::unique_ptr<HistogramSamples> snapshot =
-      GetHistogramSamplesSinceCreation(name);
-  if (snapshot) {
-    for (auto it = snapshot->Iterator(); !it->Done(); it->Next()) {
-      HistogramBase::Sample sample;
-      HistogramBase::Count count;
-      it->Get(&sample, nullptr, &count);
-      samples.push_back(Bucket(sample, count));
-    }
-  }
-  return samples;
-}
-
-HistogramBase::Count HistogramTester::GetBucketCount(
-    const std::string& name,
-    HistogramBase::Sample sample) const {
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
-  EXPECT_NE(nullptr, histogram)
-      << "Histogram \"" << name << "\" does not exist.";
-  HistogramBase::Count count = 0;
-  if (histogram) {
-    std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
-    GetBucketCountForSamples(name, sample, *samples, &count);
-  }
-  return count;
-}
-
-void HistogramTester::GetBucketCountForSamples(
-    const std::string& name,
-    HistogramBase::Sample sample,
-    const HistogramSamples& samples,
-    HistogramBase::Count* count) const {
-  *count = samples.GetCount(sample);
-  auto histogram_data = histograms_snapshot_.find(name);
-  if (histogram_data != histograms_snapshot_.end())
-    *count -= histogram_data->second->GetCount(sample);
-}
-
-HistogramTester::CountsMap HistogramTester::GetTotalCountsForPrefix(
-    const std::string& prefix) const {
-  EXPECT_TRUE(prefix.find('.') != std::string::npos)
-      << "|prefix| ought to contain at least one period, to avoid matching too"
-      << " many histograms.";
-
-  CountsMap result;
-
-  // Find candidate matches by using the logic built into GetSnapshot().
-  for (const HistogramBase* histogram : StatisticsRecorder::GetHistograms()) {
-    if (!StartsWith(histogram->histogram_name(), prefix,
-                    CompareCase::SENSITIVE)) {
-      continue;
-    }
-    std::unique_ptr<HistogramSamples> new_samples =
-        GetHistogramSamplesSinceCreation(histogram->histogram_name());
-    // Omit unchanged histograms from the result.
-    if (new_samples->TotalCount()) {
-      result[histogram->histogram_name()] = new_samples->TotalCount();
-    }
-  }
-  return result;
-}
-
-std::unique_ptr<HistogramSamples>
-HistogramTester::GetHistogramSamplesSinceCreation(
-    const std::string& histogram_name) const {
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(histogram_name);
-  // Whether the histogram exists or not may not depend on the current test
-  // calling this method, but rather on which tests ran before and possibly
-  // generated a histogram or not (see http://crbug.com/473689). To provide a
-  // response which is independent of the previously run tests, this method
-  // creates empty samples in the absence of the histogram, rather than
-  // returning null.
-  if (!histogram) {
-    return std::unique_ptr<HistogramSamples>(
-        new SampleMap(HashMetricName(histogram_name)));
-  }
-  std::unique_ptr<HistogramSamples> named_samples =
-      histogram->SnapshotSamples();
-  auto original_samples_it = histograms_snapshot_.find(histogram_name);
-  if (original_samples_it != histograms_snapshot_.end())
-    named_samples->Subtract(*original_samples_it->second.get());
-  return named_samples;
-}
-
-void HistogramTester::CheckBucketCount(const std::string& name,
-                                       HistogramBase::Sample sample,
-                                       HistogramBase::Count expected_count,
-                                       const HistogramSamples& samples) const {
-  int actual_count;
-  GetBucketCountForSamples(name, sample, samples, &actual_count);
-
-  EXPECT_EQ(expected_count, actual_count)
-      << "Histogram \"" << name
-      << "\" does not have the right number of samples (" << expected_count
-      << ") in the expected bucket (" << sample << "). It has (" << actual_count
-      << ").";
-}
-
-void HistogramTester::CheckTotalCount(const std::string& name,
-                                      HistogramBase::Count expected_count,
-                                      const HistogramSamples& samples) const {
-  int actual_count = samples.TotalCount();
-  auto histogram_data = histograms_snapshot_.find(name);
-  if (histogram_data != histograms_snapshot_.end())
-    actual_count -= histogram_data->second->TotalCount();
-
-  EXPECT_EQ(expected_count, actual_count)
-      << "Histogram \"" << name
-      << "\" does not have the right total number of samples ("
-      << expected_count << "). It has (" << actual_count << ").";
-}
-
-bool Bucket::operator==(const Bucket& other) const {
-  return min == other.min && count == other.count;
-}
-
-void PrintTo(const Bucket& bucket, std::ostream* os) {
-  *os << "Bucket " << bucket.min << ": " << bucket.count;
-}
-
-}  // namespace base
diff --git a/base/test/histogram_tester.h b/base/test/histogram_tester.h
deleted file mode 100644
index 8019931..0000000
--- a/base/test/histogram_tester.h
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TEST_HISTOGRAM_TESTER_H_
-#define BASE_TEST_HISTOGRAM_TESTER_H_
-
-#include <map>
-#include <memory>
-#include <ostream>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "base/macros.h"
-#include "base/metrics/histogram.h"
-#include "base/metrics/histogram_base.h"
-#include "base/time/time.h"
-
-namespace base {
-
-struct Bucket;
-class HistogramSamples;
-
-// HistogramTester provides a simple interface for examining histograms, UMA
-// or otherwise. Tests can use this interface to verify that histogram data is
-// getting logged as intended.
-//
-// Note: When using this class from a browser test, one might have to call
-// SubprocessMetricsProvider::MergeHistogramDeltasForTesting() to sync the
-// histogram data between the renderer and browser processes. If it is in a
-// content browser test, then content::FetchHistogramsFromChildProcesses()
-// should be used to achieve that.
-class HistogramTester {
- public:
-  using CountsMap = std::map<std::string, HistogramBase::Count>;
-
-  // Takes a snapshot of all current histograms counts.
-  HistogramTester();
-  ~HistogramTester();
-
-  // We know the exact number of samples in a bucket, and that no other bucket
-  // should have samples. Measures the diff from the snapshot taken when this
-  // object was constructed.
-  void ExpectUniqueSample(const std::string& name,
-                          HistogramBase::Sample sample,
-                          HistogramBase::Count expected_count) const;
-  template <typename T>
-  void ExpectUniqueSample(const std::string& name,
-                          T sample,
-                          HistogramBase::Count expected_count) const {
-    ExpectUniqueSample(name, static_cast<HistogramBase::Sample>(sample),
-                       expected_count);
-  }
-
-  // We know the exact number of samples in a bucket, but other buckets may
-  // have samples as well. Measures the diff from the snapshot taken when this
-  // object was constructed.
-  void ExpectBucketCount(const std::string& name,
-                         HistogramBase::Sample sample,
-                         HistogramBase::Count expected_count) const;
-  template <typename T>
-  void ExpectBucketCount(const std::string& name,
-                         T sample,
-                         HistogramBase::Count expected_count) const {
-    ExpectBucketCount(name, static_cast<HistogramBase::Sample>(sample),
-                      expected_count);
-  }
-
-  // We don't know the values of the samples, but we know how many there are.
-  // This measures the diff from the snapshot taken when this object was
-  // constructed.
-  void ExpectTotalCount(const std::string& name,
-                        HistogramBase::Count count) const;
-
-  // We know exact number of samples for buckets corresponding to a time
-  // interval. Other intervals may have samples too.
-  void ExpectTimeBucketCount(const std::string& name,
-                             TimeDelta sample,
-                             HistogramBase::Count count) const;
-
-  // Returns a list of all of the buckets recorded since creation of this
-  // object, as vector<Bucket>, where the Bucket represents the min boundary of
-  // the bucket and the count of samples recorded to that bucket since creation.
-  //
-  // Example usage, using gMock:
-  //   EXPECT_THAT(histogram_tester.GetAllSamples("HistogramName"),
-  //               ElementsAre(Bucket(1, 5), Bucket(2, 10), Bucket(3, 5)));
-  //
-  // If you build the expected list programmatically, you can use ContainerEq:
-  //   EXPECT_THAT(histogram_tester.GetAllSamples("HistogramName"),
-  //               ContainerEq(expected_buckets));
-  //
-  // or EXPECT_EQ if you prefer not to depend on gMock, at the expense of a
-  // slightly less helpful failure message:
-  //   EXPECT_EQ(expected_buckets,
-  //             histogram_tester.GetAllSamples("HistogramName"));
-  std::vector<Bucket> GetAllSamples(const std::string& name) const;
-
-  // Returns the value of the |sample| bucket for ths histogram |name|.
-  HistogramBase::Count GetBucketCount(const std::string& name,
-                                      HistogramBase::Sample sample) const;
-
-  // Finds histograms whose names start with |prefix|, and returns them along
-  // with the counts of any samples added since the creation of this object.
-  // Histograms that are unchanged are omitted from the result. The return value
-  // is a map whose keys are the histogram name, and whose values are the sample
-  // count.
-  //
-  // This is useful for cases where the code under test is choosing among a
-  // family of related histograms and incrementing one of them. Typically you
-  // should pass the result of this function directly to EXPECT_THAT.
-  //
-  // Example usage, using gmock (which produces better failure messages):
-  //   #include "testing/gmock/include/gmock/gmock.h"
-  // ...
-  //   base::HistogramTester::CountsMap expected_counts;
-  //   expected_counts["MyMetric.A"] = 1;
-  //   expected_counts["MyMetric.B"] = 1;
-  //   EXPECT_THAT(histogram_tester.GetTotalCountsForPrefix("MyMetric."),
-  //               testing::ContainerEq(expected_counts));
-  CountsMap GetTotalCountsForPrefix(const std::string& prefix) const;
-
-  // Access a modified HistogramSamples containing only what has been logged
-  // to the histogram since the creation of this object.
-  std::unique_ptr<HistogramSamples> GetHistogramSamplesSinceCreation(
-      const std::string& histogram_name) const;
-
- private:
-  // Verifies and asserts that value in the |sample| bucket matches the
-  // |expected_count|. The bucket's current value is determined from |samples|
-  // and is modified based on the snapshot stored for histogram |name|.
-  void CheckBucketCount(const std::string& name,
-                        HistogramBase::Sample sample,
-                        Histogram::Count expected_count,
-                        const HistogramSamples& samples) const;
-
-  // Verifies that the total number of values recorded for the histogram |name|
-  // is |expected_count|. This is checked against |samples| minus the snapshot
-  // that was taken for |name|.
-  void CheckTotalCount(const std::string& name,
-                       Histogram::Count expected_count,
-                       const HistogramSamples& samples) const;
-
-  // Sets the value for |count| to be the value in the |sample| bucket. The
-  // bucket's current value is determined from |samples| and is modified based
-  // on the snapshot stored for histogram |name|.
-  void GetBucketCountForSamples(const std::string& name,
-                                HistogramBase::Sample sample,
-                                const HistogramSamples& samples,
-                                HistogramBase::Count* count) const;
-
-  // Used to determine the histogram changes made during this instance's
-  // lifecycle.
-  std::map<std::string, std::unique_ptr<HistogramSamples>> histograms_snapshot_;
-
-  DISALLOW_COPY_AND_ASSIGN(HistogramTester);
-};
-
-struct Bucket {
-  Bucket(HistogramBase::Sample min, HistogramBase::Count count)
-      : min(min), count(count) {}
-
-  bool operator==(const Bucket& other) const;
-
-  HistogramBase::Sample min;
-  HistogramBase::Count count;
-};
-
-void PrintTo(const Bucket& value, std::ostream* os);
-
-}  // namespace base
-
-#endif  // BASE_TEST_HISTOGRAM_TESTER_H_
diff --git a/base/threading/platform_thread_posix.cc b/base/threading/platform_thread_posix.cc
index a5ddb2e..c3a071b 100644
--- a/base/threading/platform_thread_posix.cc
+++ b/base/threading/platform_thread_posix.cc
@@ -15,7 +15,6 @@
 
 #include <memory>
 
-#include "base/debug/activity_tracker.h"
 #include "base/lazy_instance.h"
 #include "base/logging.h"
 #include "base/threading/platform_thread_internal_posix.h"
@@ -220,9 +219,6 @@
 
 // static
 void PlatformThread::Join(PlatformThreadHandle thread_handle) {
-  // Record the event that this thread is blocking upon (for hang diagnosis).
-  base::debug::ScopedThreadJoinActivity thread_activity(&thread_handle);
-
   // Joining another thread may block the current thread for a long time, since
   // the thread referred to by |thread_handle| may still be running long-lived /
   // blocking tasks.
diff --git a/build/gen.py b/build/gen.py
index 988cc58..37592d4 100755
--- a/build/gen.py
+++ b/build/gen.py
@@ -237,7 +237,6 @@
         'base/callback_helpers.cc',
         'base/callback_internal.cc',
         'base/command_line.cc',
-        'base/debug/activity_tracker.cc',
         'base/debug/alias.cc',
         'base/debug/crash_logging.cc',
         'base/debug/dump_without_crashing.cc',
@@ -245,14 +244,12 @@
         'base/debug/task_annotator.cc',
         'base/debug/thread_heap_usage_tracker.cc',
         'base/environment.cc',
-        'base/feature_list.cc',
         'base/files/file.cc',
         'base/files/file_enumerator.cc',
         'base/files/file_path.cc',
         'base/files/file_path_constants.cc',
         'base/files/file_tracing.cc',
         'base/files/file_util.cc',
-        'base/files/important_file_writer.cc',
         'base/files/memory_mapped_file.cc',
         'base/files/scoped_file.cc',
         'base/files/scoped_temp_dir.cc',
@@ -281,24 +278,6 @@
         'base/message_loop/message_pump.cc',
         'base/message_loop/message_pump_default.cc',
         'base/message_loop/watchable_io_message_pump_posix.cc',
-        'base/metrics/bucket_ranges.cc',
-        'base/metrics/dummy_histogram.cc',
-        'base/metrics/field_trial.cc',
-        'base/metrics/field_trial_param_associator.cc',
-        'base/metrics/field_trial_params.cc',
-        'base/metrics/histogram.cc',
-        'base/metrics/histogram_base.cc',
-        'base/metrics/histogram_functions.cc',
-        'base/metrics/histogram_samples.cc',
-        'base/metrics/histogram_snapshot_manager.cc',
-        'base/metrics/metrics_hashes.cc',
-        'base/metrics/persistent_histogram_allocator.cc',
-        'base/metrics/persistent_memory_allocator.cc',
-        'base/metrics/persistent_sample_map.cc',
-        'base/metrics/sample_map.cc',
-        'base/metrics/sample_vector.cc',
-        'base/metrics/sparse_histogram.cc',
-        'base/metrics/statistics_recorder.cc',
         'base/observer_list_threadsafe.cc',
         'base/path_service.cc',
         'base/pending_task.cc',