Remove USE_ALLOCATOR_SHIM and ENABLE_MUTEX_PRIORITY_INHERITANCE buildflags

Change-Id: Id337909c705ee4bc06078e8144ba338b94e467b5
Reviewed-on: https://gn-review.googlesource.com/1121
Reviewed-by: Petr Hosek <phosek@google.com>
Commit-Queue: Scott Graham <scottmg@chromium.org>
diff --git a/base/allocator/allocator_check.cc b/base/allocator/allocator_check.cc
index 5fb8646..f5e4edc 100644
--- a/base/allocator/allocator_check.cc
+++ b/base/allocator/allocator_check.cc
@@ -4,7 +4,6 @@
 
 #include "base/allocator/allocator_check.h"
 
-#include "base/allocator/buildflags.h"
 #include "build/build_config.h"
 
 #if defined(OS_WIN)
@@ -23,11 +22,7 @@
 namespace allocator {
 
 bool IsAllocatorInitialized() {
-#if defined(OS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM)
-  // Set by allocator_shim_override_ucrt_symbols_win.h when the
-  // shimmed _set_new_mode() is called.
-  return g_is_win_shim_layer_initialized;
-#elif defined(OS_LINUX) && defined(USE_TCMALLOC) && \
+#if defined(OS_LINUX) && defined(USE_TCMALLOC) && \
     !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
 // From third_party/tcmalloc/chromium/src/gperftools/tcmalloc.h.
 // TODO(primiano): replace with an include once base can depend on allocator.
diff --git a/base/allocator/allocator_interception_mac.mm b/base/allocator/allocator_interception_mac.mm
index 5020287..dce7eda 100644
--- a/base/allocator/allocator_interception_mac.mm
+++ b/base/allocator/allocator_interception_mac.mm
@@ -27,7 +27,6 @@
 
 #include <new>
 
-#include "base/allocator/buildflags.h"
 #include "base/allocator/malloc_zone_functions_mac.h"
 #include "base/bind.h"
 #include "base/logging.h"
diff --git a/base/allocator/allocator_shim_unittest.cc b/base/allocator/allocator_shim_unittest.cc
index 3be8f2c..73ff031 100644
--- a/base/allocator/allocator_shim_unittest.cc
+++ b/base/allocator/allocator_shim_unittest.cc
@@ -11,7 +11,6 @@
 #include <new>
 #include <vector>
 
-#include "base/allocator/buildflags.h"
 #include "base/allocator/partition_allocator/partition_alloc.h"
 #include "base/atomicops.h"
 #include "base/process/process_metrics.h"
@@ -456,12 +455,6 @@
   ASSERT_EQ(kNumThreads, GetNumberOfNewHandlerCalls());
 }
 
-#if defined(OS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM)
-TEST_F(AllocatorShimTest, ShimReplacesCRTHeapWhenEnabled) {
-  ASSERT_NE(::GetProcessHeap(), reinterpret_cast<HANDLE>(_get_heap_handle()));
-}
-#endif  // defined(OS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM)
-
 }  // namespace
 }  // namespace allocator
 }  // namespace base
diff --git a/base/debug/thread_heap_usage_tracker.cc b/base/debug/thread_heap_usage_tracker.cc
index 6d00b1c..f9852db 100644
--- a/base/debug/thread_heap_usage_tracker.cc
+++ b/base/debug/thread_heap_usage_tracker.cc
@@ -11,7 +11,6 @@
 #include <type_traits>
 
 #include "base/allocator/allocator_shim.h"
-#include "base/allocator/buildflags.h"
 #include "base/logging.h"
 #include "base/no_destructor.h"
 #include "base/threading/thread_local_storage.h"
@@ -306,11 +305,7 @@
 
   CHECK_EQ(false, g_heap_tracking_enabled) << "No double-enabling.";
   g_heap_tracking_enabled = true;
-#if BUILDFLAG(USE_ALLOCATOR_SHIM)
-  base::allocator::InsertAllocatorDispatch(&allocator_dispatch);
-#else
   CHECK(false) << "Can't enable heap tracking without the shim.";
-#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM)
 }
 
 bool ThreadHeapUsageTracker::IsHeapTrackingEnabled() {
@@ -318,11 +313,7 @@
 }
 
 void ThreadHeapUsageTracker::DisableHeapTrackingForTesting() {
-#if BUILDFLAG(USE_ALLOCATOR_SHIM)
-  base::allocator::RemoveAllocatorDispatchForTesting(&allocator_dispatch);
-#else
   CHECK(false) << "Can't disable heap tracking without the shim.";
-#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM)
   DCHECK_EQ(true, g_heap_tracking_enabled) << "Heap tracking not enabled.";
   g_heap_tracking_enabled = false;
 }
diff --git a/base/debug/thread_heap_usage_tracker.h b/base/debug/thread_heap_usage_tracker.h
index eb03b3f..89166d0 100644
--- a/base/debug/thread_heap_usage_tracker.h
+++ b/base/debug/thread_heap_usage_tracker.h
@@ -7,7 +7,6 @@
 
 #include <stdint.h>
 
-#include "base/allocator/buildflags.h"
 #include "base/base_export.h"
 #include "base/threading/thread_checker.h"
 
@@ -114,4 +113,4 @@
 }  // namespace debug
 }  // namespace base
 
-#endif  // BASE_DEBUG_THREAD_HEAP_USAGE_TRACKER_H_
\ No newline at end of file
+#endif  // BASE_DEBUG_THREAD_HEAP_USAGE_TRACKER_H_
diff --git a/base/debug/thread_heap_usage_tracker_unittest.cc b/base/debug/thread_heap_usage_tracker_unittest.cc
index b99576c..fc7fda4 100644
--- a/base/debug/thread_heap_usage_tracker_unittest.cc
+++ b/base/debug/thread_heap_usage_tracker_unittest.cc
@@ -7,7 +7,6 @@
 #include <map>
 
 #include "base/allocator/allocator_shim.h"
-#include "base/allocator/buildflags.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 #if defined(OS_MACOSX)
@@ -553,55 +552,5 @@
   MockFree(alloc);
 }
 
-#if BUILDFLAG(USE_ALLOCATOR_SHIM)
-class ThreadHeapUsageShimTest : public testing::Test {
-#if defined(OS_MACOSX)
-  void SetUp() override { allocator::InitializeAllocatorShim(); }
-  void TearDown() override { allocator::UninterceptMallocZonesForTesting(); }
-#endif
-};
-
-TEST_F(ThreadHeapUsageShimTest, HooksIntoMallocWhenShimAvailable) {
-  ASSERT_FALSE(ThreadHeapUsageTracker::IsHeapTrackingEnabled());
-
-  ThreadHeapUsageTracker::EnableHeapTracking();
-
-  ASSERT_TRUE(ThreadHeapUsageTracker::IsHeapTrackingEnabled());
-
-  const size_t kAllocSize = 9993;
-  // This test verifies that the scoped heap data is affected by malloc &
-  // free only when the shim is available.
-  ThreadHeapUsageTracker usage_tracker;
-  usage_tracker.Start();
-
-  ThreadHeapUsage u1 = ThreadHeapUsageTracker::GetUsageSnapshot();
-  void* ptr = malloc(kAllocSize);
-  // Prevent the compiler from optimizing out the malloc/free pair.
-  ASSERT_NE(nullptr, ptr);
-
-  ThreadHeapUsage u2 = ThreadHeapUsageTracker::GetUsageSnapshot();
-  free(ptr);
-
-  usage_tracker.Stop(false);
-  ThreadHeapUsage u3 = usage_tracker.usage();
-
-  // Verify that at least one allocation operation was recorded, and that free
-  // operations are at least monotonically growing.
-  EXPECT_LE(0U, u1.alloc_ops);
-  EXPECT_LE(u1.alloc_ops + 1, u2.alloc_ops);
-  EXPECT_LE(u1.alloc_ops + 1, u3.alloc_ops);
-
-  // Verify that at least the bytes above were recorded.
-  EXPECT_LE(u1.alloc_bytes + kAllocSize, u2.alloc_bytes);
-
-  // Verify that at least the one free operation above was recorded.
-  EXPECT_LE(u2.free_ops + 1, u3.free_ops);
-
-  TestingThreadHeapUsageTracker::DisableHeapTrackingForTesting();
-
-  ASSERT_FALSE(ThreadHeapUsageTracker::IsHeapTrackingEnabled());
-}
-#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM)
-
 }  // namespace debug
 }  // namespace base
diff --git a/base/process/memory_linux.cc b/base/process/memory_linux.cc
index 21b2069..ffd8757 100644
--- a/base/process/memory_linux.cc
+++ b/base/process/memory_linux.cc
@@ -9,7 +9,6 @@
 #include <new>
 
 #include "base/allocator/allocator_shim.h"
-#include "base/allocator/buildflags.h"
 #include "base/files/file_path.h"
 #include "base/files/file_util.h"
 #include "base/logging.h"
@@ -52,9 +51,7 @@
   // If we're using glibc's allocator, the above functions will override
   // malloc and friends and make them die on out of memory.
 
-#if BUILDFLAG(USE_ALLOCATOR_SHIM)
-  allocator::SetCallNewHandlerOnMallocFailure(true);
-#elif defined(USE_TCMALLOC)
+#if defined(USE_TCMALLOC)
   // For tcmalloc, we need to tell it to behave like new.
   tc_set_new_mode(1);
 #endif
@@ -98,9 +95,7 @@
 }
 
 bool UncheckedMalloc(size_t size, void** result) {
-#if BUILDFLAG(USE_ALLOCATOR_SHIM)
-  *result = allocator::UncheckedAlloc(size);
-#elif defined(MEMORY_TOOL_REPLACES_ALLOCATOR) || \
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) || \
     (!defined(LIBC_GLIBC) && !defined(USE_TCMALLOC))
   *result = malloc(size);
 #elif defined(LIBC_GLIBC) && !defined(USE_TCMALLOC)
diff --git a/base/process/memory_mac.mm b/base/process/memory_mac.mm
index 5b8cd13..6cf380f 100644
--- a/base/process/memory_mac.mm
+++ b/base/process/memory_mac.mm
@@ -6,7 +6,6 @@
 
 #include "base/allocator/allocator_interception_mac.h"
 #include "base/allocator/allocator_shim.h"
-#include "base/allocator/buildflags.h"
 #include "build/build_config.h"
 
 namespace base {
@@ -35,13 +34,7 @@
   // Step 1: Enable OOM killer on C++ failures.
   std::set_new_handler(oom_killer_new);
 
-// Step 2: Enable OOM killer on C-malloc failures for the default zone (if we
-// have a shim).
-#if BUILDFLAG(USE_ALLOCATOR_SHIM)
-  allocator::SetCallNewHandlerOnMallocFailure(true);
-#endif
-
-  // Step 3: Enable OOM killer on all other malloc zones (or just "all" without
+  // Step 2: Enable OOM killer on all other malloc zones (or just "all" without
   // "other" if shim is disabled).
   allocator::InterceptAllocationsMac();
 }
diff --git a/base/process/memory_unittest.cc b/base/process/memory_unittest.cc
index 835cf7e..41c078d 100644
--- a/base/process/memory_unittest.cc
+++ b/base/process/memory_unittest.cc
@@ -11,7 +11,6 @@
 #include <limits>
 
 #include "base/allocator/allocator_check.h"
-#include "base/allocator/buildflags.h"
 #include "base/compiler_specific.h"
 #include "base/debug/alias.h"
 #include "base/memory/aligned_memory.h"
@@ -61,9 +60,6 @@
 // will fail.
 
 TEST(ProcessMemoryTest, MacTerminateOnHeapCorruption) {
-#if BUILDFLAG(USE_ALLOCATOR_SHIM)
-  base::allocator::InitializeAllocatorShim();
-#endif
   // Assert that freeing an unallocated pointer will crash the process.
   char buf[9];
   asm("" : "=r" (buf));  // Prevent clang from being too smart.
@@ -79,19 +75,12 @@
 #else
   ADD_FAILURE() << "This test is not supported in this build configuration.";
 #endif
-
-#if BUILDFLAG(USE_ALLOCATOR_SHIM)
-  base::allocator::UninterceptMallocZonesForTesting();
-#endif
 }
 
 #endif  // defined(OS_MACOSX)
 
 TEST(MemoryTest, AllocatorShimWorking) {
 #if defined(OS_MACOSX)
-#if BUILDFLAG(USE_ALLOCATOR_SHIM)
-  base::allocator::InitializeAllocatorShim();
-#endif
   base::allocator::InterceptAllocationsMac();
 #endif
   ASSERT_TRUE(base::allocator::IsAllocatorInitialized());
@@ -100,434 +89,3 @@
   base::allocator::UninterceptMallocZonesForTesting();
 #endif
 }
-
-// OpenBSD does not support these tests. Don't test these on ASan/TSan/MSan
-// configurations: only test the real allocator.
-// Windows only supports these tests with the allocator shim in place.
-#if !defined(OS_OPENBSD) && BUILDFLAG(USE_ALLOCATOR_SHIM) && \
-    !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-
-namespace {
-#if defined(OS_WIN)
-// Windows raises an exception rather than using LOG(FATAL) in order to make the
-// exit code unique to OOM.
-const char* kOomRegex = "";
-const int kExitCode = base::win::kOomExceptionCode;
-#else
-const char* kOomRegex = "Out of memory";
-const int kExitCode = 1;
-#endif
-}  // namespace
-
-class OutOfMemoryTest : public testing::Test {
- public:
-  OutOfMemoryTest()
-      : value_(nullptr),
-        // Make test size as large as possible minus a few pages so
-        // that alignment or other rounding doesn't make it wrap.
-        test_size_(std::numeric_limits<std::size_t>::max() - 12 * 1024),
-        // A test size that is > 2Gb and will cause the allocators to reject
-        // the allocation due to security restrictions. See crbug.com/169327.
-        insecure_test_size_(std::numeric_limits<int>::max()),
-        signed_test_size_(std::numeric_limits<ssize_t>::max()) {}
-
- protected:
-  void* value_;
-  size_t test_size_;
-  size_t insecure_test_size_;
-  ssize_t signed_test_size_;
-};
-
-class OutOfMemoryDeathTest : public OutOfMemoryTest {
- public:
-  void SetUpInDeathAssert() {
-#if defined(OS_MACOSX) && BUILDFLAG(USE_ALLOCATOR_SHIM)
-    base::allocator::InitializeAllocatorShim();
-#endif
-
-    // Must call EnableTerminationOnOutOfMemory() because that is called from
-    // chrome's main function and therefore hasn't been called yet.
-    // Since this call may result in another thread being created and death
-    // tests shouldn't be started in a multithread environment, this call
-    // should be done inside of the ASSERT_DEATH.
-    base::EnableTerminationOnOutOfMemory();
-  }
-
-#if defined(OS_MACOSX)
-  void TearDown() override {
-    base::allocator::UninterceptMallocZonesForTesting();
-  }
-#endif
-};
-
-TEST_F(OutOfMemoryDeathTest, New) {
-  ASSERT_EXIT({
-      SetUpInDeathAssert();
-      value_ = operator new(test_size_);
-    }, testing::ExitedWithCode(kExitCode), kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, NewArray) {
-  ASSERT_EXIT({
-      SetUpInDeathAssert();
-      value_ = new char[test_size_];
-    }, testing::ExitedWithCode(kExitCode), kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, Malloc) {
-  ASSERT_EXIT({
-      SetUpInDeathAssert();
-      value_ = malloc(test_size_);
-    }, testing::ExitedWithCode(kExitCode), kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, Realloc) {
-  ASSERT_EXIT({
-      SetUpInDeathAssert();
-      value_ = realloc(nullptr, test_size_);
-    }, testing::ExitedWithCode(kExitCode), kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, Calloc) {
-  ASSERT_EXIT({
-      SetUpInDeathAssert();
-      value_ = calloc(1024, test_size_ / 1024L);
-    }, testing::ExitedWithCode(kExitCode), kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, AlignedAlloc) {
-  ASSERT_EXIT({
-      SetUpInDeathAssert();
-      value_ = base::AlignedAlloc(test_size_, 8);
-    }, testing::ExitedWithCode(kExitCode), kOomRegex);
-}
-
-// POSIX does not define an aligned realloc function.
-#if defined(OS_WIN)
-TEST_F(OutOfMemoryDeathTest, AlignedRealloc) {
-  ASSERT_EXIT({
-      SetUpInDeathAssert();
-      value_ = _aligned_realloc(NULL, test_size_, 8);
-    }, testing::ExitedWithCode(kExitCode), kOomRegex);
-}
-
-namespace {
-
-constexpr uint32_t kUnhandledExceptionExitCode = 0xBADA55;
-
-// This unhandled exception filter exits the process with an exit code distinct
-// from the exception code. This is to verify that the out of memory new handler
-// causes an unhandled exception.
-LONG WINAPI ExitingUnhandledExceptionFilter(EXCEPTION_POINTERS* ExceptionInfo) {
-  _exit(kUnhandledExceptionExitCode);
-}
-
-}  // namespace
-
-TEST_F(OutOfMemoryDeathTest, NewHandlerGeneratesUnhandledException) {
-  ASSERT_EXIT(
-      {
-        SetUpInDeathAssert();
-        SetUnhandledExceptionFilter(&ExitingUnhandledExceptionFilter);
-        value_ = new char[test_size_];
-      },
-      testing::ExitedWithCode(kUnhandledExceptionExitCode), kOomRegex);
-}
-#endif  // defined(OS_WIN)
-
-// OS X and Android have no 2Gb allocation limit.
-// See https://crbug.com/169327.
-#if !defined(OS_MACOSX) && !defined(OS_ANDROID)
-TEST_F(OutOfMemoryDeathTest, SecurityNew) {
-  ASSERT_EXIT({
-      SetUpInDeathAssert();
-      value_ = operator new(insecure_test_size_);
-    }, testing::ExitedWithCode(kExitCode), kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, SecurityNewArray) {
-  ASSERT_EXIT({
-      SetUpInDeathAssert();
-      value_ = new char[insecure_test_size_];
-    }, testing::ExitedWithCode(kExitCode), kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, SecurityMalloc) {
-  ASSERT_EXIT({
-      SetUpInDeathAssert();
-      value_ = malloc(insecure_test_size_);
-    }, testing::ExitedWithCode(kExitCode), kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, SecurityRealloc) {
-  ASSERT_EXIT({
-      SetUpInDeathAssert();
-      value_ = realloc(nullptr, insecure_test_size_);
-    }, testing::ExitedWithCode(kExitCode), kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, SecurityCalloc) {
-  ASSERT_EXIT({
-      SetUpInDeathAssert();
-      value_ = calloc(1024, insecure_test_size_ / 1024L);
-    }, testing::ExitedWithCode(kExitCode), kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, SecurityAlignedAlloc) {
-  ASSERT_EXIT({
-      SetUpInDeathAssert();
-      value_ = base::AlignedAlloc(insecure_test_size_, 8);
-    }, testing::ExitedWithCode(kExitCode), kOomRegex);
-}
-
-// POSIX does not define an aligned realloc function.
-#if defined(OS_WIN)
-TEST_F(OutOfMemoryDeathTest, SecurityAlignedRealloc) {
-  ASSERT_EXIT({
-      SetUpInDeathAssert();
-      value_ = _aligned_realloc(NULL, insecure_test_size_, 8);
-    }, testing::ExitedWithCode(kExitCode), kOomRegex);
-}
-#endif  // defined(OS_WIN)
-#endif  // !defined(OS_MACOSX) && !defined(OS_ANDROID)
-
-#if defined(OS_LINUX)
-
-TEST_F(OutOfMemoryDeathTest, Valloc) {
-  ASSERT_DEATH({
-      SetUpInDeathAssert();
-      value_ = valloc(test_size_);
-    }, kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, SecurityValloc) {
-  ASSERT_DEATH({
-      SetUpInDeathAssert();
-      value_ = valloc(insecure_test_size_);
-    }, kOomRegex);
-}
-
-#if PVALLOC_AVAILABLE == 1
-TEST_F(OutOfMemoryDeathTest, Pvalloc) {
-  ASSERT_DEATH({
-      SetUpInDeathAssert();
-      value_ = pvalloc(test_size_);
-    }, kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, SecurityPvalloc) {
-  ASSERT_DEATH({
-      SetUpInDeathAssert();
-      value_ = pvalloc(insecure_test_size_);
-    }, kOomRegex);
-}
-#endif  // PVALLOC_AVAILABLE == 1
-
-TEST_F(OutOfMemoryDeathTest, Memalign) {
-  ASSERT_DEATH({
-      SetUpInDeathAssert();
-      value_ = memalign(4, test_size_);
-    }, kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, ViaSharedLibraries) {
-  // This tests that the run-time symbol resolution is overriding malloc for
-  // shared libraries as well as for our code.
-  ASSERT_DEATH({
-    SetUpInDeathAssert();
-    value_ = MallocWrapper(test_size_);
-  }, kOomRegex);
-}
-#endif  // OS_LINUX
-
-// Android doesn't implement posix_memalign().
-#if defined(OS_POSIX) && !defined(OS_ANDROID)
-TEST_F(OutOfMemoryDeathTest, Posix_memalign) {
-  // Grab the return value of posix_memalign to silence a compiler warning
-  // about unused return values. We don't actually care about the return
-  // value, since we're asserting death.
-  ASSERT_DEATH({
-      SetUpInDeathAssert();
-      EXPECT_EQ(ENOMEM, posix_memalign(&value_, 8, test_size_));
-    }, kOomRegex);
-}
-#endif  // defined(OS_POSIX) && !defined(OS_ANDROID)
-
-#if defined(OS_MACOSX)
-
-// Purgeable zone tests
-
-TEST_F(OutOfMemoryDeathTest, MallocPurgeable) {
-  malloc_zone_t* zone = malloc_default_purgeable_zone();
-  ASSERT_DEATH({
-      SetUpInDeathAssert();
-      value_ = malloc_zone_malloc(zone, test_size_);
-    }, kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, ReallocPurgeable) {
-  malloc_zone_t* zone = malloc_default_purgeable_zone();
-  ASSERT_DEATH({
-      SetUpInDeathAssert();
-      value_ = malloc_zone_realloc(zone, NULL, test_size_);
-    }, kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, CallocPurgeable) {
-  malloc_zone_t* zone = malloc_default_purgeable_zone();
-  ASSERT_DEATH({
-      SetUpInDeathAssert();
-      value_ = malloc_zone_calloc(zone, 1024, test_size_ / 1024L);
-    }, kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, VallocPurgeable) {
-  malloc_zone_t* zone = malloc_default_purgeable_zone();
-  ASSERT_DEATH({
-      SetUpInDeathAssert();
-      value_ = malloc_zone_valloc(zone, test_size_);
-    }, kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, PosixMemalignPurgeable) {
-  malloc_zone_t* zone = malloc_default_purgeable_zone();
-  ASSERT_DEATH({
-      SetUpInDeathAssert();
-      value_ = malloc_zone_memalign(zone, 8, test_size_);
-    }, kOomRegex);
-}
-
-// Since these allocation functions take a signed size, it's possible that
-// calling them just once won't be enough to exhaust memory. In the 32-bit
-// environment, it's likely that these allocation attempts will fail because
-// not enough contiguous address space is available. In the 64-bit environment,
-// it's likely that they'll fail because they would require a preposterous
-// amount of (virtual) memory.
-
-TEST_F(OutOfMemoryDeathTest, CFAllocatorSystemDefault) {
-  ASSERT_DEATH({
-      SetUpInDeathAssert();
-      while ((value_ =
-              base::AllocateViaCFAllocatorSystemDefault(signed_test_size_))) {}
-    }, kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, CFAllocatorMalloc) {
-  ASSERT_DEATH({
-      SetUpInDeathAssert();
-      while ((value_ =
-              base::AllocateViaCFAllocatorMalloc(signed_test_size_))) {}
-    }, kOomRegex);
-}
-
-TEST_F(OutOfMemoryDeathTest, CFAllocatorMallocZone) {
-  ASSERT_DEATH({
-      SetUpInDeathAssert();
-      while ((value_ =
-              base::AllocateViaCFAllocatorMallocZone(signed_test_size_))) {}
-    }, kOomRegex);
-}
-
-#if !defined(ARCH_CPU_64_BITS)
-
-// See process_util_unittest_mac.mm for an explanation of why this test isn't
-// run in the 64-bit environment.
-
-TEST_F(OutOfMemoryDeathTest, PsychoticallyBigObjCObject) {
-  ASSERT_DEATH({
-      SetUpInDeathAssert();
-      while ((value_ = base::AllocatePsychoticallyBigObjCObject())) {}
-    }, kOomRegex);
-}
-
-#endif  // !ARCH_CPU_64_BITS
-#endif  // OS_MACOSX
-
-class OutOfMemoryHandledTest : public OutOfMemoryTest {
- public:
-  static const size_t kSafeMallocSize = 512;
-  static const size_t kSafeCallocSize = 128;
-  static const size_t kSafeCallocItems = 4;
-
-  void SetUp() override {
-    OutOfMemoryTest::SetUp();
-
-    // We enable termination on OOM - just as Chrome does at early
-    // initialization - and test that UncheckedMalloc and  UncheckedCalloc
-    // properly by-pass this in order to allow the caller to handle OOM.
-    base::EnableTerminationOnOutOfMemory();
-  }
-
-  void TearDown() override {
-#if defined(OS_MACOSX)
-    base::allocator::UninterceptMallocZonesForTesting();
-#endif
-  }
-};
-
-#if defined(OS_WIN)
-
-namespace {
-
-DWORD HandleOutOfMemoryException(EXCEPTION_POINTERS* exception_ptrs,
-                                 size_t expected_size) {
-  EXPECT_EQ(base::win::kOomExceptionCode,
-            exception_ptrs->ExceptionRecord->ExceptionCode);
-  EXPECT_LE(1U, exception_ptrs->ExceptionRecord->NumberParameters);
-  EXPECT_EQ(expected_size,
-            exception_ptrs->ExceptionRecord->ExceptionInformation[0]);
-  return EXCEPTION_EXECUTE_HANDLER;
-}
-
-}  // namespace
-
-TEST_F(OutOfMemoryTest, TerminateBecauseOutOfMemoryReportsAllocSize) {
-// On Windows, TerminateBecauseOutOfMemory reports the attempted allocation
-// size in the exception raised.
-#if defined(ARCH_CPU_64_BITS)
-  // Test with a size larger than 32 bits on 64 bit machines.
-  const size_t kAttemptedAllocationSize = 0xBADA55F00DULL;
-#else
-  const size_t kAttemptedAllocationSize = 0xBADA55;
-#endif
-
-  __try {
-    base::TerminateBecauseOutOfMemory(kAttemptedAllocationSize);
-  } __except (HandleOutOfMemoryException(GetExceptionInformation(),
-                                         kAttemptedAllocationSize)) {
-  }
-}
-#endif  // OS_WIN
-
-// TODO(b.kelemen): make UncheckedMalloc and UncheckedCalloc work
-// on Windows as well.
-TEST_F(OutOfMemoryHandledTest, UncheckedMalloc) {
-  EXPECT_TRUE(base::UncheckedMalloc(kSafeMallocSize, &value_));
-  EXPECT_TRUE(value_ != nullptr);
-  free(value_);
-
-  EXPECT_FALSE(base::UncheckedMalloc(test_size_, &value_));
-  EXPECT_TRUE(value_ == nullptr);
-}
-
-TEST_F(OutOfMemoryHandledTest, UncheckedCalloc) {
-  EXPECT_TRUE(base::UncheckedCalloc(1, kSafeMallocSize, &value_));
-  EXPECT_TRUE(value_ != nullptr);
-  const char* bytes = static_cast<const char*>(value_);
-  for (size_t i = 0; i < kSafeMallocSize; ++i)
-    EXPECT_EQ(0, bytes[i]);
-  free(value_);
-
-  EXPECT_TRUE(
-      base::UncheckedCalloc(kSafeCallocItems, kSafeCallocSize, &value_));
-  EXPECT_TRUE(value_ != nullptr);
-  bytes = static_cast<const char*>(value_);
-  for (size_t i = 0; i < (kSafeCallocItems * kSafeCallocSize); ++i)
-    EXPECT_EQ(0, bytes[i]);
-  free(value_);
-
-  EXPECT_FALSE(base::UncheckedCalloc(1, test_size_, &value_));
-  EXPECT_TRUE(value_ == nullptr);
-}
-#endif  // !defined(OS_OPENBSD) && BUILDFLAG(ENABLE_WIN_ALLOCATOR_SHIM_TESTS) &&
-        // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/sampling_heap_profiler/sampling_heap_profiler.cc b/base/sampling_heap_profiler/sampling_heap_profiler.cc
index 3d7424b..94383a2 100644
--- a/base/sampling_heap_profiler/sampling_heap_profiler.cc
+++ b/base/sampling_heap_profiler/sampling_heap_profiler.cc
@@ -9,7 +9,6 @@
 #include <utility>
 
 #include "base/allocator/allocator_shim.h"
-#include "base/allocator/buildflags.h"
 #include "base/allocator/partition_allocator/partition_alloc.h"
 #include "base/atomicops.h"
 #include "base/debug/stack_trace.h"
@@ -199,13 +198,9 @@
 
 // static
 bool SamplingHeapProfiler::InstallAllocatorHooks() {
-#if BUILDFLAG(USE_ALLOCATOR_SHIM)
-  base::allocator::InsertAllocatorDispatch(&g_allocator_dispatch);
-#else
   ignore_result(g_allocator_dispatch);
   DLOG(WARNING)
       << "base::allocator shims are not available for memory sampling.";
-#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM)
 
 #if BUILDFLAG(USE_PARTITION_ALLOC) && !defined(OS_NACL)
   base::PartitionAllocHooks::SetAllocationHook(&PartitionAllocHook);
diff --git a/base/security_unittest.cc b/base/security_unittest.cc
index 13e9594..3073299 100644
--- a/base/security_unittest.cc
+++ b/base/security_unittest.cc
@@ -14,7 +14,6 @@
 #include <limits>
 #include <memory>
 
-#include "base/allocator/buildflags.h"
 #include "base/files/file_util.h"
 #include "base/logging.h"
 #include "base/memory/free_deleter.h"
diff --git a/base/synchronization/lock_impl_posix.cc b/base/synchronization/lock_impl_posix.cc
index 7571f68..392c53c 100644
--- a/base/synchronization/lock_impl_posix.cc
+++ b/base/synchronization/lock_impl_posix.cc
@@ -11,7 +11,6 @@
 #include "base/posix/safe_strerror.h"
 #include "base/strings/stringprintf.h"
 #include "base/synchronization/lock.h"
-#include "base/synchronization/synchronization_buildflags.h"
 #include "build/build_config.h"
 
 namespace base {
@@ -105,9 +104,7 @@
 
 // static
 bool LockImpl::PriorityInheritanceAvailable() {
-#if BUILDFLAG(ENABLE_MUTEX_PRIORITY_INHERITANCE)
-  return true;
-#elif PRIORITY_INHERITANCE_LOCKS_POSSIBLE() && defined(OS_MACOSX)
+#if PRIORITY_INHERITANCE_LOCKS_POSSIBLE() && defined(OS_MACOSX)
   return true;
 #else
   // Security concerns prevent the use of priority inheritance mutexes on Linux.
diff --git a/base/trace_event/malloc_dump_provider.cc b/base/trace_event/malloc_dump_provider.cc
index 46fdb3e..975e6eb 100644
--- a/base/trace_event/malloc_dump_provider.cc
+++ b/base/trace_event/malloc_dump_provider.cc
@@ -9,7 +9,6 @@
 #include <unordered_map>
 
 #include "base/allocator/allocator_extension.h"
-#include "base/allocator/buildflags.h"
 #include "base/debug/profiler.h"
 #include "base/trace_event/process_memory_dump.h"
 #include "base/trace_event/trace_event_argument.h"
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index f6cc832..54c93a2 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -11,7 +11,6 @@
 #include <memory>
 #include <utility>
 
-#include "base/allocator/buildflags.h"
 #include "base/base_switches.h"
 #include "base/command_line.h"
 #include "base/debug/alias.h"
@@ -112,40 +111,6 @@
          (mode == kHeapProfilingModeBackground);
 }
 
-#if BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
-inline bool IsHeapProfilingModeEnabled(HeapProfilingMode mode) {
-  return mode != kHeapProfilingModeDisabled &&
-         mode != kHeapProfilingModeInvalid;
-}
-
-void EnableFilteringForPseudoStackProfiling() {
-  if (AllocationContextTracker::capture_mode() !=
-          AllocationContextTracker::CaptureMode::PSEUDO_STACK ||
-      (TraceLog::GetInstance()->enabled_modes() & TraceLog::FILTERING_MODE)) {
-    return;
-  }
-  // Create trace config with heap profiling filter.
-  std::string filter_string = JoinString(
-      {"*", TRACE_DISABLED_BY_DEFAULT("net"), TRACE_DISABLED_BY_DEFAULT("cc"),
-       MemoryDumpManager::kTraceCategory},
-      ",");
-  TraceConfigCategoryFilter category_filter;
-  category_filter.InitializeFromString(filter_string);
-
-  TraceConfig::EventFilterConfig heap_profiler_filter_config(
-      HeapProfilerEventFilter::kName);
-  heap_profiler_filter_config.SetCategoryFilter(category_filter);
-
-  TraceConfig::EventFilters filters;
-  filters.push_back(heap_profiler_filter_config);
-  TraceConfig filtering_trace_config;
-  filtering_trace_config.SetEventFilters(filters);
-
-  TraceLog::GetInstance()->SetEnabled(filtering_trace_config,
-                                      TraceLog::FILTERING_MODE);
-}
-#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
-
 }  // namespace
 
 // static
@@ -208,84 +173,8 @@
 
 bool MemoryDumpManager::EnableHeapProfiling(HeapProfilingMode profiling_mode) {
   AutoLock lock(lock_);
-#if BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
-  bool notify_mdps = true;
-
-  if (heap_profiling_mode_ == kHeapProfilingModeInvalid)
-    return false;  // Disabled permanently.
-
-  if (IsHeapProfilingModeEnabled(heap_profiling_mode_) ==
-      IsHeapProfilingModeEnabled(profiling_mode)) {
-    if (profiling_mode == kHeapProfilingModeDisabled)
-      heap_profiling_mode_ = kHeapProfilingModeInvalid;  // Disable permanently.
-    return false;
-  }
-
-  switch (profiling_mode) {
-    case kHeapProfilingModeTaskProfiler:
-      if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled())
-        base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
-      notify_mdps = false;
-      break;
-
-    case kHeapProfilingModeBackground:
-      AllocationContextTracker::SetCaptureMode(
-          AllocationContextTracker::CaptureMode::MIXED_STACK);
-      break;
-
-    case kHeapProfilingModePseudo:
-      AllocationContextTracker::SetCaptureMode(
-          AllocationContextTracker::CaptureMode::PSEUDO_STACK);
-      EnableFilteringForPseudoStackProfiling();
-      break;
-
-    case kHeapProfilingModeNative:
-#if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
-    {
-      bool can_unwind = CFIBacktraceAndroid::GetInitializedInstance()
-                            ->can_unwind_stack_frames();
-      DCHECK(can_unwind);
-    }
-#endif
-      // If we don't have frame pointers and unwind tables then native tracing
-      // falls-back to using base::debug::StackTrace, which may be slow.
-      AllocationContextTracker::SetCaptureMode(
-          AllocationContextTracker::CaptureMode::NATIVE_STACK);
-      break;
-
-    case kHeapProfilingModeDisabled:
-      if (heap_profiling_mode_ == kHeapProfilingModeTaskProfiler) {
-        LOG(ERROR) << "ThreadHeapUsageTracker cannot be disabled.";
-        return false;
-      }
-      if (heap_profiling_mode_ == kHeapProfilingModePseudo)
-        TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
-      AllocationContextTracker::SetCaptureMode(
-          AllocationContextTracker::CaptureMode::DISABLED);
-      heap_profiling_mode_ = kHeapProfilingModeInvalid;  // Disable permanently.
-      break;
-
-    default:
-      NOTREACHED() << "Incorrect heap profiling mode " << profiling_mode;
-      return false;
-  }
-
-  if (heap_profiling_mode_ != kHeapProfilingModeInvalid)
-    heap_profiling_mode_ = profiling_mode;
-
-  // In case tracing was already enabled, setup the serialization state before
-  // notifying mdps.
-  InitializeHeapProfilerStateIfNeededLocked();
-  if (notify_mdps) {
-    bool enabled = IsHeapProfilingModeEnabled(heap_profiling_mode_);
-    for (const auto& mdpinfo : dump_providers_)
-      NotifyHeapProfilingEnabledLocked(mdpinfo, enabled);
-  }
-  return true;
-#else
   heap_profiling_mode_ = kHeapProfilingModeInvalid;
   return false;
-#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
 }
 
 HeapProfilingMode MemoryDumpManager::GetHeapProfilingMode() {
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index e92045e..2cd6977 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -10,7 +10,6 @@
 #include <utility>
 #include <vector>
 
-#include "base/allocator/buildflags.h"
 #include "base/base_switches.h"
 #include "base/callback.h"
 #include "base/command_line.h"
@@ -826,141 +825,6 @@
   DisableTracing();
 }
 
-#if BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
-TEST_F(MemoryDumpManagerTest, EnableHeapProfilingPseudoStack) {
-  MockMemoryDumpProvider mdp1;
-  MockMemoryDumpProvider mdp2;
-  MockMemoryDumpProvider mdp3;
-  MemoryDumpProvider::Options supported_options;
-  supported_options.supports_heap_profiling = true;
-  RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get(), supported_options);
-  {
-    testing::InSequence sequence;
-    EXPECT_CALL(mdp1, OnHeapProfilingEnabled(true)).Times(1);
-    EXPECT_CALL(mdp1, OnHeapProfilingEnabled(false)).Times(1);
-  }
-  {
-    testing::InSequence sequence;
-    EXPECT_CALL(mdp2, OnHeapProfilingEnabled(true)).Times(1);
-    EXPECT_CALL(mdp2, OnHeapProfilingEnabled(false)).Times(1);
-  }
-  RegisterDumpProvider(&mdp3, ThreadTaskRunnerHandle::Get());
-  EXPECT_CALL(mdp3, OnHeapProfilingEnabled(_)).Times(0);
-
-  EXPECT_TRUE(mdm_->EnableHeapProfiling(kHeapProfilingModePseudo));
-  RunLoop().RunUntilIdle();
-  ASSERT_EQ(AllocationContextTracker::CaptureMode::PSEUDO_STACK,
-            AllocationContextTracker::capture_mode());
-  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModePseudo);
-  EXPECT_EQ(TraceLog::FILTERING_MODE, TraceLog::GetInstance()->enabled_modes());
-  RegisterDumpProvider(&mdp2, ThreadTaskRunnerHandle::Get(), supported_options);
-
-  TraceConfig::MemoryDumpConfig config;
-  config.heap_profiler_options.breakdown_threshold_bytes = 100;
-  mdm_->SetupForTracing(config);
-  EXPECT_EQ(config.heap_profiler_options.breakdown_threshold_bytes,
-            mdm_->heap_profiler_serialization_state_for_testing()
-                ->heap_profiler_breakdown_threshold_bytes());
-  EXPECT_TRUE(
-      mdm_->heap_profiler_serialization_state_for_testing()->is_initialized());
-  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModePseudo);
-  mdm_->TeardownForTracing();
-  EXPECT_FALSE(mdm_->heap_profiler_serialization_state_for_testing());
-
-  // Disable will permanently disable heap profiling.
-  EXPECT_TRUE(mdm_->EnableHeapProfiling(kHeapProfilingModeDisabled));
-  RunLoop().RunUntilIdle();
-  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
-  EXPECT_EQ(0, TraceLog::GetInstance()->enabled_modes());
-  EXPECT_FALSE(mdm_->heap_profiler_serialization_state_for_testing());
-  ASSERT_EQ(AllocationContextTracker::CaptureMode::DISABLED,
-            AllocationContextTracker::capture_mode());
-  EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModePseudo));
-  ASSERT_EQ(AllocationContextTracker::CaptureMode::DISABLED,
-            AllocationContextTracker::capture_mode());
-  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
-  EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModeDisabled));
-  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
-}
-
-TEST_F(MemoryDumpManagerTestAsCoordinator, EnableHeapProfilingBackground) {
-  MockMemoryDumpProvider mdp1;
-  MemoryDumpProvider::Options supported_options;
-  supported_options.supports_heap_profiling = true;
-  RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get(), supported_options);
-  testing::InSequence sequence;
-  EXPECT_CALL(mdp1, OnHeapProfilingEnabled(true)).Times(1);
-  EXPECT_CALL(mdp1, OnHeapProfilingEnabled(false)).Times(1);
-
-  // Enable tracing before heap profiling.
-  TraceConfig::MemoryDumpConfig config;
-  config.heap_profiler_options.breakdown_threshold_bytes = 100;
-  mdm_->SetupForTracing(config);
-  EXPECT_EQ(config.heap_profiler_options.breakdown_threshold_bytes,
-            mdm_->heap_profiler_serialization_state_for_testing()
-                ->heap_profiler_breakdown_threshold_bytes());
-  EXPECT_FALSE(
-      mdm_->heap_profiler_serialization_state_for_testing()->is_initialized());
-
-  EXPECT_TRUE(mdm_->EnableHeapProfiling(kHeapProfilingModeBackground));
-  RunLoop().RunUntilIdle();
-  ASSERT_EQ(AllocationContextTracker::CaptureMode::MIXED_STACK,
-            AllocationContextTracker::capture_mode());
-  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeBackground);
-  EXPECT_EQ(0u, TraceLog::GetInstance()->enabled_modes());
-  EXPECT_TRUE(
-      mdm_->heap_profiler_serialization_state_for_testing()->is_initialized());
-  // Do nothing when already enabled.
-  EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModeBackground));
-  EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModePseudo));
-  ASSERT_EQ(AllocationContextTracker::CaptureMode::MIXED_STACK,
-            AllocationContextTracker::capture_mode());
-  EXPECT_EQ(0u, TraceLog::GetInstance()->enabled_modes());
-  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeBackground);
-  // Disable will permanently disable heap profiling.
-  EXPECT_TRUE(mdm_->EnableHeapProfiling(kHeapProfilingModeDisabled));
-  RunLoop().RunUntilIdle();
-  ASSERT_EQ(AllocationContextTracker::CaptureMode::DISABLED,
-            AllocationContextTracker::capture_mode());
-  EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModePseudo));
-  ASSERT_EQ(AllocationContextTracker::CaptureMode::DISABLED,
-            AllocationContextTracker::capture_mode());
-  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
-  EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModeDisabled));
-  RunLoop().RunUntilIdle();
-  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
-  mdm_->TeardownForTracing();
-  EXPECT_FALSE(mdm_->heap_profiler_serialization_state_for_testing());
-}
-
-TEST_F(MemoryDumpManagerTestAsCoordinator, EnableHeapProfilingTask) {
-  MockMemoryDumpProvider mdp1;
-  MockMemoryDumpProvider mdp2;
-  MemoryDumpProvider::Options supported_options;
-  supported_options.supports_heap_profiling = true;
-  RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get(), supported_options);
-  EXPECT_CALL(mdp1, OnHeapProfilingEnabled(_)).Times(0);
-  EXPECT_CALL(mdp2, OnHeapProfilingEnabled(_)).Times(0);
-
-  ASSERT_FALSE(base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled());
-  EXPECT_TRUE(mdm_->EnableHeapProfiling(kHeapProfilingModeTaskProfiler));
-  RunLoop().RunUntilIdle();
-  ASSERT_EQ(AllocationContextTracker::CaptureMode::DISABLED,
-            AllocationContextTracker::capture_mode());
-  RegisterDumpProvider(&mdp2, ThreadTaskRunnerHandle::Get(), supported_options);
-  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeTaskProfiler);
-  ASSERT_TRUE(debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled());
-  TestingThreadHeapUsageTracker::DisableHeapTrackingForTesting();
-  ASSERT_FALSE(base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled());
-}
-
-TEST_F(MemoryDumpManagerTestAsCoordinator, EnableHeapProfilingDisableDisabled) {
-  ASSERT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeDisabled);
-  EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModeDisabled));
-  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
-}
-#endif  //  BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
-
 // Mock MDP class that tests if the number of OnMemoryDump() calls are expected.
 // It is implemented without gmocks since EXPECT_CALL implementation is slow
 // when there are 1000s of instances, as required in
diff --git a/tools/gn/bootstrap/bootstrap.py b/tools/gn/bootstrap/bootstrap.py
index 5cbb010..cbbce40 100755
--- a/tools/gn/bootstrap/bootstrap.py
+++ b/tools/gn/bootstrap/bootstrap.py
@@ -226,14 +226,6 @@
   root_gen_dir = os.path.join(tempdir, 'gen')
   mkdir_p(root_gen_dir)
 
-  write_buildflag_header_manually(
-      root_gen_dir,
-      'base/synchronization/synchronization_buildflags.h',
-      {'ENABLE_MUTEX_PRIORITY_INHERITANCE': 'false'})
-
-  write_buildflag_header_manually(root_gen_dir, 'base/allocator/buildflags.h',
-      {'USE_ALLOCATOR_SHIM': 'true' if is_linux else 'false'})
-
   write_buildflag_header_manually(root_gen_dir,
                                   'base/debug/debugging_buildflags.h',
       {